From 92ac7bc35a7d6f744299a7ccb93ddbc4ff89a225 Mon Sep 17 00:00:00 2001 From: Frank Chen Date: Thu, 22 Sep 2022 16:01:45 -0700 Subject: [PATCH] media: camera: Build tegra-camera as OOT module Port camera drivers below from /kenrel/nvidia to /kernel/nvidia-oot as OOT modules: - Fusa-capture driver - Tegra V4L2 framework driver - vi/csi driver - tegra camera platform driver Change-Id: I390af27096425bb11e0934201dd1a90f001bb3fa Signed-off-by: Frank Chen Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2780698 Reviewed-by: FNU Raunak Reviewed-by: Ankur Pawar Reviewed-by: Shiva Dubey GVS: Gerrit_Virtual_Submit --- drivers/media/platform/tegra/Makefile | 3 + drivers/media/platform/tegra/camera/Makefile | 35 + .../platform/tegra/camera/camera_common.c | 1161 +++++++ .../media/platform/tegra/camera/camera_gpio.c | 149 + .../media/platform/tegra/camera/camera_gpio.h | 19 + .../tegra/camera/camera_version_utils.c | 75 + drivers/media/platform/tegra/camera/csi/csi.c | 1159 +++++++ .../camera/fusa-capture/capture-common.c | 676 ++++ .../camera/fusa-capture/capture-isp-channel.c | 627 ++++ .../tegra/camera/fusa-capture/capture-isp.c | 1988 ++++++++++++ .../camera/fusa-capture/capture-vi-channel.c | 767 +++++ .../tegra/camera/fusa-capture/capture-vi.c | 1751 ++++++++++ .../platform/tegra/camera/nvcamera_log.c | 120 + .../platform/tegra/camera/nvcamera_log.h | 23 + .../platform/tegra/camera/nvcsi/csi5_fops.c | 519 +++ .../platform/tegra/camera/nvcsi/csi5_fops.h | 15 + .../media/platform/tegra/camera/regmap_util.c | 169 + .../platform/tegra/camera/sensor_common.c | 864 +++++ .../platform/tegra/camera/tegracam_core.c | 204 ++ .../platform/tegra/camera/tegracam_ctrls.c | 1026 ++++++ .../platform/tegra/camera/tegracam_utils.c | 249 ++ .../platform/tegra/camera/tegracam_v4l2.c | 252 ++ .../media/platform/tegra/camera/vi/channel.c | 2714 ++++++++++++++++ drivers/media/platform/tegra/camera/vi/core.c | 184 ++ .../media/platform/tegra/camera/vi/graph.c | 657 ++++ .../platform/tegra/camera/vi/mc_common.c | 362 +++ .../media/platform/tegra/camera/vi/vi5_fops.c | 994 ++++++ .../media/platform/tegra/camera/vi/vi5_fops.h | 13 + .../platform/tegra/camera/vi/vi5_formats.h | 141 + .../media/platform/tegra/mipical/mipi_cal.h | 46 + drivers/video/tegra/Makefile | 1 + drivers/video/tegra/camera/Makefile | 7 + .../video/tegra/camera/tegra_camera_dev_mfi.c | 260 ++ .../tegra/camera/tegra_camera_platform.c | 1189 +++++++ drivers/video/tegra/host/host1x/host1x.h | 245 ++ drivers/video/tegra/host/nvcsi/deskew.h | 160 + drivers/video/tegra/host/nvcsi/nvcsi.h | 31 + include/linux/arm64-barrier.h | 17 + include/linux/platform/tegra/bwmgr_mc.h | 29 + include/linux/platform/tegra/isomgr.h | 204 ++ .../linux/platform/tegra/latency_allowance.h | 330 ++ include/media/camera_common.h | 403 +++ include/media/camera_version_utils.h | 43 + include/media/csi.h | 171 + include/media/csi4_registers.h | 211 ++ include/media/csi5_registers.h | 42 + include/media/fusa-capture/capture-common.h | 236 ++ .../media/fusa-capture/capture-isp-channel.h | 120 + include/media/fusa-capture/capture-isp.h | 361 ++ .../media/fusa-capture/capture-vi-channel.h | 205 ++ include/media/fusa-capture/capture-vi.h | 454 +++ include/media/mc_common.h | 444 +++ include/media/nvc_focus.h | 102 + include/media/sensor_common.h | 42 + include/media/tegra-v4l2-camera.h | 174 + include/media/tegra_camera_core.h | 126 + include/media/tegra_camera_dev_mfi.h | 55 + include/media/tegra_camera_platform.h | 95 + include/media/tegra_v4l2_camera.h | 36 + include/media/tegracam_core.h | 45 + include/media/tegracam_utils.h | 39 + include/media/vi.h | 108 + include/media/vi2_registers.h | 216 ++ include/media/vi4_registers.h | 259 ++ include/soc/tegra/camrtc-capture-messages.h | 1114 +++++++ include/soc/tegra/camrtc-capture.h | 2891 +++++++++++++++++ include/soc/tegra/tegra-i2c-rtcpu.h | 87 + include/trace/events/camera_common.h | 133 + include/uapi/linux/nvhost_events.h | 290 ++ include/uapi/linux/nvhost_nvcsi_ioctl.h | 71 + include/uapi/linux/nvhost_vi_ioctl.h | 33 + include/uapi/media/camera_device.h | 119 + include/uapi/media/tegra_camera_platform.h | 21 + include/video/vi4.h | 45 + 74 files changed, 28226 insertions(+) create mode 100644 drivers/media/platform/tegra/camera/Makefile create mode 100644 drivers/media/platform/tegra/camera/camera_common.c create mode 100644 drivers/media/platform/tegra/camera/camera_gpio.c create mode 100644 drivers/media/platform/tegra/camera/camera_gpio.h create mode 100644 drivers/media/platform/tegra/camera/camera_version_utils.c create mode 100644 drivers/media/platform/tegra/camera/csi/csi.c create mode 100644 drivers/media/platform/tegra/camera/fusa-capture/capture-common.c create mode 100644 drivers/media/platform/tegra/camera/fusa-capture/capture-isp-channel.c create mode 100644 drivers/media/platform/tegra/camera/fusa-capture/capture-isp.c create mode 100644 drivers/media/platform/tegra/camera/fusa-capture/capture-vi-channel.c create mode 100644 drivers/media/platform/tegra/camera/fusa-capture/capture-vi.c create mode 100644 drivers/media/platform/tegra/camera/nvcamera_log.c create mode 100644 drivers/media/platform/tegra/camera/nvcamera_log.h create mode 100644 drivers/media/platform/tegra/camera/nvcsi/csi5_fops.c create mode 100644 drivers/media/platform/tegra/camera/nvcsi/csi5_fops.h create mode 100644 drivers/media/platform/tegra/camera/regmap_util.c create mode 100644 drivers/media/platform/tegra/camera/sensor_common.c create mode 100644 drivers/media/platform/tegra/camera/tegracam_core.c create mode 100644 drivers/media/platform/tegra/camera/tegracam_ctrls.c create mode 100644 drivers/media/platform/tegra/camera/tegracam_utils.c create mode 100644 drivers/media/platform/tegra/camera/tegracam_v4l2.c create mode 100644 drivers/media/platform/tegra/camera/vi/channel.c create mode 100644 drivers/media/platform/tegra/camera/vi/core.c create mode 100644 drivers/media/platform/tegra/camera/vi/graph.c create mode 100644 drivers/media/platform/tegra/camera/vi/mc_common.c create mode 100644 drivers/media/platform/tegra/camera/vi/vi5_fops.c create mode 100644 drivers/media/platform/tegra/camera/vi/vi5_fops.h create mode 100644 drivers/media/platform/tegra/camera/vi/vi5_formats.h create mode 100644 drivers/media/platform/tegra/mipical/mipi_cal.h create mode 100644 drivers/video/tegra/camera/Makefile create mode 100644 drivers/video/tegra/camera/tegra_camera_dev_mfi.c create mode 100644 drivers/video/tegra/camera/tegra_camera_platform.c create mode 100644 drivers/video/tegra/host/host1x/host1x.h create mode 100644 drivers/video/tegra/host/nvcsi/deskew.h create mode 100644 drivers/video/tegra/host/nvcsi/nvcsi.h create mode 100644 include/linux/arm64-barrier.h create mode 100644 include/linux/platform/tegra/bwmgr_mc.h create mode 100644 include/linux/platform/tegra/isomgr.h create mode 100644 include/linux/platform/tegra/latency_allowance.h create mode 100644 include/media/camera_common.h create mode 100644 include/media/camera_version_utils.h create mode 100644 include/media/csi.h create mode 100644 include/media/csi4_registers.h create mode 100644 include/media/csi5_registers.h create mode 100644 include/media/fusa-capture/capture-common.h create mode 100644 include/media/fusa-capture/capture-isp-channel.h create mode 100644 include/media/fusa-capture/capture-isp.h create mode 100644 include/media/fusa-capture/capture-vi-channel.h create mode 100644 include/media/fusa-capture/capture-vi.h create mode 100644 include/media/mc_common.h create mode 100644 include/media/nvc_focus.h create mode 100644 include/media/sensor_common.h create mode 100644 include/media/tegra-v4l2-camera.h create mode 100644 include/media/tegra_camera_core.h create mode 100644 include/media/tegra_camera_dev_mfi.h create mode 100644 include/media/tegra_camera_platform.h create mode 100644 include/media/tegra_v4l2_camera.h create mode 100644 include/media/tegracam_core.h create mode 100644 include/media/tegracam_utils.h create mode 100644 include/media/vi.h create mode 100644 include/media/vi2_registers.h create mode 100644 include/media/vi4_registers.h create mode 100644 include/soc/tegra/camrtc-capture-messages.h create mode 100644 include/soc/tegra/camrtc-capture.h create mode 100644 include/soc/tegra/tegra-i2c-rtcpu.h create mode 100644 include/trace/events/camera_common.h create mode 100644 include/uapi/linux/nvhost_events.h create mode 100644 include/uapi/linux/nvhost_nvcsi_ioctl.h create mode 100644 include/uapi/linux/nvhost_vi_ioctl.h create mode 100644 include/uapi/media/camera_device.h create mode 100644 include/uapi/media/tegra_camera_platform.h create mode 100644 include/video/vi4.h diff --git a/drivers/media/platform/tegra/Makefile b/drivers/media/platform/tegra/Makefile index 63a246c9..bebe47ff 100644 --- a/drivers/media/platform/tegra/Makefile +++ b/drivers/media/platform/tegra/Makefile @@ -1,5 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +LINUXINCLUDE += -I$(srctree.nvidia-oot)/include + obj-m += cdi/ obj-m += isc/ +obj-m += camera/ diff --git a/drivers/media/platform/tegra/camera/Makefile b/drivers/media/platform/tegra/camera/Makefile new file mode 100644 index 00000000..0251d084 --- /dev/null +++ b/drivers/media/platform/tegra/camera/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +LINUXINCLUDE += -I$(srctree.nvidia-oot)/include +LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/host +LINUXINCLUDE += -I$(srctree)/drivers/video/tegra/host +LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/camera +LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/media/platform/tegra +LINUXINCLUDE += -DCONFIG_TEGRA_HOST1X +LINUXINCLUDE += -Werror + +tegra-camera-objs := regmap_util.o +tegra-camera-objs += camera_common.o +tegra-camera-objs += camera_gpio.o +tegra-camera-objs += sensor_common.o +tegra-camera-objs += camera_version_utils.o +tegra-camera-objs += nvcamera_log.o +tegra-camera-objs += tegracam_v4l2.o +tegra-camera-objs += tegracam_core.o +tegra-camera-objs += tegracam_ctrls.o +tegra-camera-objs += tegracam_utils.o +tegra-camera-objs += vi/vi5_fops.o +tegra-camera-objs += vi/mc_common.o +tegra-camera-objs += vi/graph.o +tegra-camera-objs += vi/channel.o +tegra-camera-objs += vi/core.o +tegra-camera-objs += csi/csi.o +tegra-camera-objs += nvcsi/csi5_fops.o +tegra-camera-objs += fusa-capture/capture-vi.o +tegra-camera-objs += fusa-capture/capture-common.o +tegra-camera-objs += fusa-capture/capture-vi-channel.o +tegra-camera-objs += fusa-capture/capture-isp-channel.o +tegra-camera-objs += fusa-capture/capture-isp.o +obj-m += tegra-camera.o + diff --git a/drivers/media/platform/tegra/camera/camera_common.c b/drivers/media/platform/tegra/camera/camera_common.c new file mode 100644 index 00000000..9dedf3d6 --- /dev/null +++ b/drivers/media/platform/tegra/camera/camera_common.c @@ -0,0 +1,1161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camera_common.c - utilities for tegra camera driver + * + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define has_s_op(master, op) \ + (master->ops && master->ops->op) +#define call_s_op(master, op) \ + (has_s_op(master, op) ? \ + master->ops->op(master) : 0) +#define call_s_ops(master, op, ...) \ + (has_s_op(master, op) ? \ + master->ops->op(master, __VA_ARGS__) : 0) + +#define HDR_ENABLE 0x1 + +static const struct camera_common_colorfmt camera_common_color_fmts[] = { + { + MEDIA_BUS_FMT_SRGGB12_1X12, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_SRGGB12, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_SGRBG12, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_SGBRG12 + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_SRGGB10, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_SGRBG10, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_SGBRG10, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_SBGGR10, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_SRGGB8, + }, + { + MEDIA_BUS_FMT_YUYV8_1X16, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_YUYV, + }, + { + MEDIA_BUS_FMT_YVYU8_1X16, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_YVYU, + }, + { + MEDIA_BUS_FMT_UYVY8_1X16, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_UYVY, + }, + { + MEDIA_BUS_FMT_VYUY8_1X16, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_VYUY, + }, + { + MEDIA_BUS_FMT_RGB888_1X24, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_RGB24, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_YUYV, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_YVYU, + }, + { + MEDIA_BUS_FMT_UYVY8_2X8, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_UYVY, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + V4L2_COLORSPACE_SRGB, + V4L2_PIX_FMT_VYUY, + }, +}; + +struct camera_common_csi_io_pad_ctx { + const char *name; + atomic_t ref; +}; + +static struct camera_common_csi_io_pad_ctx camera_common_csi_io_pads[] = { + {"csia", ATOMIC_INIT(0)}, + {"csib", ATOMIC_INIT(0)}, + {"csic", ATOMIC_INIT(0)}, + {"csid", ATOMIC_INIT(0)}, + {"csie", ATOMIC_INIT(0)}, + {"csif", ATOMIC_INIT(0)}, + {"csig", ATOMIC_INIT(0)}, + {"csih", ATOMIC_INIT(0)}, +}; + +static bool camera_common_verify_code( + struct tegra_channel *chan, unsigned int code) +{ + int i; + + for (i = 0; i < chan->num_video_formats; i++) { + if (chan->video_formats[i]->code == code) + return true; + } + + return false; +} + +int camera_common_g_ctrl(struct camera_common_data *s_data, + struct v4l2_control *control) +{ + int i; + + for (i = 0; i < s_data->numctrls; i++) { + if (s_data->ctrls[i]->id == control->id) { + control->value = s_data->ctrls[i]->val; + dev_dbg(s_data->dev, + "%s: found control %s\n", __func__, + s_data->ctrls[i]->name); + return 0; + } + } + + spec_bar(); + + return -EFAULT; +} +EXPORT_SYMBOL_GPL(camera_common_g_ctrl); + +int camera_common_regulator_get(struct device *dev, + struct regulator **vreg, const char *vreg_name) +{ + struct regulator *reg = NULL; + int err = 0; + + reg = devm_regulator_get(dev, vreg_name); + if (unlikely(IS_ERR(reg))) { + dev_err(dev, "%s %s ERR: %p\n", + __func__, vreg_name, reg); + err = PTR_ERR(reg); + reg = NULL; + } else + dev_dbg(dev, "%s: %s\n", + __func__, vreg_name); + + *vreg = reg; + return err; +} +EXPORT_SYMBOL_GPL(camera_common_regulator_get); + +int camera_common_parse_clocks(struct device *dev, + struct camera_common_pdata *pdata) +{ + struct device_node *np = dev->of_node; + const char *prop; + int proplen = 0; + int i = 0; + int numclocks = 0; + int mclk_index = 0; + int parentclk_index = -1; + int err = 0; + + + pdata->mclk_name = NULL; + pdata->parentclk_name = NULL; + err = of_property_read_string(np, "mclk", &pdata->mclk_name); + if (!err) { + dev_dbg(dev, "mclk in DT %s\n", pdata->mclk_name); + err = of_property_read_string(np, "parent-clk", + &pdata->parentclk_name); + if (err) { + dev_dbg(dev, "Failed to find parent-clk\n"); + } + return 0; + } + + prop = (const char *)of_get_property(np, "clock-names", &proplen); + if (!prop) + return -ENODATA; + + /* find length of clock-names string array */ + for (i = 0; i < proplen; i++) { + if (prop[i] == '\0') + numclocks++; + } + + if (numclocks > 1) { + err = of_property_read_u32(np, "mclk-index", &mclk_index); + if (err) { + dev_err(dev, "Failed to find mclk index\n"); + return err; + } + err = of_property_read_u32(np, "parent-clk-index", + &parentclk_index); + } + + for (i = 0; i < numclocks; i++) { + if (i == mclk_index) { + pdata->mclk_name = prop; + dev_dbg(dev, "%s: mclk_name is %s\n", + __func__, pdata->mclk_name); + } else if (i == parentclk_index) { + pdata->parentclk_name = prop; + dev_dbg(dev, "%s: parentclk_name is %s\n", + __func__, pdata->parentclk_name); + } else + dev_dbg(dev, "%s: %s\n", __func__, prop); + prop += strlen(prop) + 1; + } + + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_parse_clocks); + +int camera_common_parse_ports(struct device *dev, + struct camera_common_data *s_data) +{ + struct device_node *node = dev->of_node; + struct device_node *ep = NULL; + struct device_node *next; + int bus_width = 0; + int err = 0; + int port = 0; + + /* Parse all the remote entities and put them into the list */ + next = of_graph_get_next_endpoint(node, ep); + if (!next) + return -ENODATA; + + of_node_put(ep); + ep = next; + + err = of_property_read_u32(ep, "bus-width", &bus_width); + if (err) { + dev_err(dev, + "Failed to find num of lanes\n"); + return err; + } + s_data->numlanes = bus_width; + + err = of_property_read_u32(ep, "port-index", &port); + if (err) { + dev_err(dev, + "Failed to find port index\n"); + return err; + } + s_data->csi_port = port; + + dev_dbg(dev, "%s: port %d num of lanes %d\n", + __func__, s_data->csi_port, s_data->numlanes); + + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_parse_ports); + +int camera_common_parse_general_properties(struct device *dev, + struct camera_common_data *s_data) +{ + struct device_node *np = dev->of_node; + int err = 0; + const char *str; + + s_data->use_sensor_mode_id = false; + err = of_property_read_string(np, "use_sensor_mode_id", &str); + if (!err) { + if (!strcmp(str, "true")) + s_data->use_sensor_mode_id = true; + } + + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_parse_general_properties); + +int camera_common_debugfs_show(struct seq_file *s, void *unused) +{ + struct camera_common_data *s_data = s->private; + + dev_dbg(s_data->dev, "%s: ++\n", __func__); + + return 0; +} + +ssize_t camera_common_debugfs_write( + struct file *file, + char const __user *buf, + size_t count, + loff_t *offset) +{ + struct camera_common_data *s_data = + ((struct seq_file *)file->private_data)->private; + struct device *dev = s_data->dev; + int err = 0; + char buffer[MAX_BUFFER_SIZE]; + u32 address; + u32 data; + u8 readback = 0; + + dev_dbg(dev, "%s: ++\n", __func__); + + if (copy_from_user(&buffer, buf, sizeof(buffer))) + goto debugfs_write_fail; + + if (sscanf(buffer, "0x%x 0x%x", &address, &data) == 2) + goto set_attr; + if (sscanf(buffer, "0X%x 0X%x", &address, &data) == 2) + goto set_attr; + if (sscanf(buffer, "%d %d", &address, &data) == 2) + goto set_attr; + + if (sscanf(buffer, "0x%x 0x%x", &address, &data) == 1) + goto read; + if (sscanf(buffer, "0X%x 0X%x", &address, &data) == 1) + goto read; + if (sscanf(buffer, "%d %d", &address, &data) == 1) + goto read; + + dev_err(dev, "SYNTAX ERROR: %s\n", buf); + return -EFAULT; + +set_attr: + dev_dbg(dev, + "new address = %x, data = %x\n", address, data); + err |= call_s_ops(s_data, write_reg, address, data); +read: + err |= call_s_ops(s_data, read_reg, address, &readback); + dev_dbg(dev, + "wrote to address 0x%x with value 0x%x\n", + address, readback); + + if (err) + goto debugfs_write_fail; + + return count; + +debugfs_write_fail: + dev_err(dev, + "%s: test pattern write failed\n", __func__); + return -EFAULT; +} + +int camera_common_debugfs_open(struct inode *inode, struct file *file) +{ + struct camera_common_data *s_data = inode->i_private; + struct device *dev = s_data->dev; + + dev_dbg(dev, "%s: ++\n", __func__); + + return single_open(file, camera_common_debugfs_show, inode->i_private); +} + +static const struct file_operations camera_common_debugfs_fops = { + .open = camera_common_debugfs_open, + .read = seq_read, + .write = camera_common_debugfs_write, + .llseek = seq_lseek, + .release = single_release, +}; + +void camera_common_remove_debugfs( + struct camera_common_data *s_data) +{ + struct device *dev = s_data->dev; + + dev_dbg(dev, "%s: ++\n", __func__); + + debugfs_remove_recursive(s_data->debugdir); + s_data->debugdir = NULL; +} +EXPORT_SYMBOL_GPL(camera_common_remove_debugfs); + +void camera_common_create_debugfs( + struct camera_common_data *s_data, + const char *name) +{ + struct dentry *err; + struct device *dev = s_data->dev; + + dev_dbg(dev, "%s %s\n", __func__, name); + + s_data->debugdir = + debugfs_create_dir(name, NULL); + if (!s_data->debugdir) + goto remove_debugfs; + + err = debugfs_create_file("d", + S_IWUSR | S_IRUGO, + s_data->debugdir, s_data, + &camera_common_debugfs_fops); + if (!err) + goto remove_debugfs; + + return; +remove_debugfs: + dev_err(dev, "couldn't create debugfs\n"); + camera_common_remove_debugfs(s_data); +} +EXPORT_SYMBOL_GPL(camera_common_create_debugfs); + +/* Find a data format by a pixel code in an array */ +const struct camera_common_colorfmt *camera_common_find_datafmt( + unsigned int code) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(camera_common_color_fmts); i++) + if (camera_common_color_fmts[i].code == code) + return camera_common_color_fmts + i; + + return NULL; +} +EXPORT_SYMBOL_GPL(camera_common_find_datafmt); + +/* Find a data format by pixel format in an array*/ +const struct camera_common_colorfmt *camera_common_find_pixelfmt( + unsigned int pix_fmt) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(camera_common_color_fmts); i++) + if (camera_common_color_fmts[i].pix_fmt == pix_fmt) + return camera_common_color_fmts + i; + + return NULL; +} +EXPORT_SYMBOL_GPL(camera_common_find_pixelfmt); + +/* Filters for the sensor's supported colors */ +static const struct camera_common_colorfmt *find_matching_color_fmt( + const struct camera_common_data *s_data, + size_t index) +{ + const struct sensor_properties *sensor_props = &s_data->sensor_props; + const size_t num_modes = sensor_props->num_modes; + const size_t common_fmts_size = ARRAY_SIZE(camera_common_color_fmts); + + struct sensor_image_properties *cur_props; + bool matched[ARRAY_SIZE(camera_common_color_fmts)]; + int match_num = -1; + int match_index = -1; + size_t i, j; + + // Clear matched array so no format has been matched + memset(matched, 0, sizeof(matched)); + + // Find and count matching color formats + for (i = 0; i < common_fmts_size; i++) { + for (j = 0; j < num_modes; j++) { + cur_props = &sensor_props->sensor_modes[j]. + image_properties; + if (cur_props->pixel_format == + camera_common_color_fmts[i].pix_fmt && + !matched[i]) { + match_num++; + match_index = i; + // Found index + if (match_num == index) + goto break_loops; + } + } + } +break_loops: + if (match_num < index) + return NULL; + index = array_index_nospec(index, match_num); + return &camera_common_color_fmts[match_index]; +} + +int camera_common_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + struct tegra_channel *chan = v4l2_get_subdev_hostdata(sd); + const struct camera_common_colorfmt *sensor_fmt; + + sensor_fmt = find_matching_color_fmt(s_data, code->index); + + if (sensor_fmt == NULL) + return -EINVAL; + + if (!camera_common_verify_code(chan, sensor_fmt->code)) + return -EINVAL; + + code->code = sensor_fmt->code; + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_enum_mbus_code); + +int camera_common_enum_fmt(struct v4l2_subdev *sd, unsigned int index, + unsigned int *code) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + const struct camera_common_colorfmt *sensor_fmt; + + sensor_fmt = find_matching_color_fmt(s_data, index); + + if (sensor_fmt == NULL) + return -EINVAL; + *code = sensor_fmt->code; + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_enum_fmt); + +static void select_mode(struct camera_common_data *s_data, + struct v4l2_mbus_framefmt *mf, + unsigned int mode_type) +{ + int i; + const struct camera_common_frmfmt *frmfmt = s_data->frmfmt; + bool flag = 0; + + for (i = 0; i < s_data->numfmts; i++) { + if (mode_type & HDR_ENABLE) + flag = !frmfmt[i].hdr_en; + /* Add more flags for different controls as needed */ + + if (flag) + continue; + + if (mf->width == frmfmt[i].size.width && + mf->height == frmfmt[i].size.height) { + s_data->mode = frmfmt[i].mode; + s_data->mode_prop_idx = i; + break; + } + } + + spec_bar(); +} + +int camera_common_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + struct tegra_channel *chan = v4l2_get_subdev_hostdata(sd); + struct v4l2_control hdr_control; + const struct camera_common_frmfmt *frmfmt; + unsigned int mode_type = 0; + int err = 0; + int i; + + dev_dbg(sd->dev, "%s: size %i x %i\n", __func__, + mf->width, mf->height); + + if (!s_data) + return -EINVAL; + + frmfmt = s_data->frmfmt; + + /* check hdr enable ctrl */ + hdr_control.id = TEGRA_CAMERA_CID_HDR_EN; + + /* mode_type can be filled in sensor driver */ + if (!(v4l2_g_ctrl(s_data->ctrl_handler, &hdr_control))) + mode_type |= + switch_ctrl_qmenu[hdr_control.value] ? HDR_ENABLE : 0; + + s_data->mode = s_data->def_mode; + s_data->mode_prop_idx = 0; + s_data->fmt_width = s_data->def_width; + s_data->fmt_height = s_data->def_height; + + if (s_data->use_sensor_mode_id && + s_data->sensor_mode_id >= 0 && + s_data->sensor_mode_id < s_data->numfmts) { + dev_dbg(sd->dev, "%s: use_sensor_mode_id %d\n", + __func__, s_data->use_sensor_mode_id); + s_data->mode = frmfmt[s_data->sensor_mode_id].mode; + s_data->mode_prop_idx = s_data->sensor_mode_id; + if (mf->width == frmfmt[s_data->sensor_mode_id].size.width && + mf->height == frmfmt[s_data->sensor_mode_id].size.height) { + s_data->fmt_width = mf->width; + s_data->fmt_height = mf->height; + } + else + { + mf->width = s_data->fmt_width; + mf->height = s_data->fmt_height; + dev_dbg(sd->dev, + "%s: invalid resolution %d x %d\n", + __func__, mf->width, mf->height); + goto verify_code; + } + } else { + /* select mode based on format match first */ + for (i = 0; i < s_data->numfmts; i++) { + if (mf->width == frmfmt[i].size.width && + mf->height == frmfmt[i].size.height) { + s_data->mode = frmfmt[i].mode; + s_data->mode_prop_idx = i; + s_data->fmt_width = mf->width; + s_data->fmt_height = mf->height; + break; + } + } + + spec_bar(); + + if (i == s_data->numfmts) { + mf->width = s_data->fmt_width; + mf->height = s_data->fmt_height; + dev_dbg(sd->dev, + "%s: invalid resolution supplied to set mode %d %d\n", + __func__, mf->width, mf->height); + goto verify_code; + } + /* update mode based on special mode types */ + if (mode_type) + select_mode(s_data, mf, mode_type); + } + + if (!camera_common_verify_code(chan, mf->code)) + err = -EINVAL; + +verify_code: + mf->field = V4L2_FIELD_NONE; + mf->colorspace = V4L2_COLORSPACE_SRGB; + mf->xfer_func = V4L2_XFER_FUNC_DEFAULT; + mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mf->quantization = V4L2_QUANTIZATION_DEFAULT; + + return err; +} +EXPORT_SYMBOL_GPL(camera_common_try_fmt); + +int camera_common_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + int ret; + + dev_dbg(sd->dev, "%s(%u) size %i x %i\n", __func__, + mf->code, mf->width, mf->height); + + if (!s_data) + return -EINVAL; + + /* MIPI CSI could have changed the format, double-check */ + if (!camera_common_find_datafmt(mf->code)) + return -EINVAL; + + ret = camera_common_try_fmt(sd, mf); + + s_data->colorfmt = camera_common_find_datafmt(mf->code); + + return ret; +} +EXPORT_SYMBOL_GPL(camera_common_s_fmt); + +int camera_common_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + const struct camera_common_colorfmt *fmt; + + dev_dbg(sd->dev, "%s++\n", __func__); + + if (!s_data) + return -EINVAL; + fmt = s_data->colorfmt; + + mf->code = fmt->code; + mf->colorspace = fmt->colorspace; + mf->width = s_data->fmt_width; + mf->height = s_data->fmt_height; + mf->field = V4L2_FIELD_NONE; + mf->xfer_func = fmt->xfer_func; + mf->ycbcr_enc = fmt->ycbcr_enc; + mf->quantization = fmt->quantization; + + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_g_fmt); + +static int camera_common_evaluate_color_format(struct v4l2_subdev *sd, + int code) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + const size_t common_fmts_size = ARRAY_SIZE(camera_common_color_fmts); + struct sensor_image_properties *cur_props; + struct sensor_properties *sensor_props; + size_t sensor_num_modes; + int i, pixelformat; + + if (!s_data) + return -EINVAL; + + sensor_props = &s_data->sensor_props; + sensor_num_modes = sensor_props->num_modes; + + for (i = 0; i < common_fmts_size; i++) { + if (camera_common_color_fmts[i].code == code) + break; + } + + if (i == common_fmts_size) { + dev_dbg(s_data->dev, + "%s: unsupported color format(%08x) for vi\n" + , __func__, code); + return -EINVAL; + } + + pixelformat = camera_common_color_fmts[i].pix_fmt; + + for (i = 0; i < sensor_num_modes; i++) { + cur_props = &sensor_props->sensor_modes[i].image_properties; + if (cur_props->pixel_format == pixelformat) + return 0; + } + + spec_bar(); + + if (i == sensor_num_modes) { + dev_dbg(s_data->dev, + "%s: unsupported color format(%08x) for sensor\n" + , __func__, code); + return -EINVAL; + } + + return 0; +} + +int camera_common_enum_framesizes(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + int ret; + + if (!s_data || !s_data->frmfmt) + return -EINVAL; + + if (fse->index >= s_data->numfmts) + return -EINVAL; + fse->index = array_index_nospec(fse->index, s_data->numfmts); + + ret = camera_common_evaluate_color_format(sd, fse->code); + if (ret) + return ret; + + fse->min_width = fse->max_width = + s_data->frmfmt[fse->index].size.width; + fse->min_height = fse->max_height = + s_data->frmfmt[fse->index].size.height; + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_enum_framesizes); + +int camera_common_enum_frameintervals(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + int i, ret; + + if (!s_data || !s_data->frmfmt) + return -EINVAL; + + /* Check color format */ + ret = camera_common_evaluate_color_format(sd, fie->code); + if (ret) + return ret; + + /* Check resolution sizes */ + for (i = 0; i < s_data->numfmts; i++) { + if (s_data->frmfmt[i].size.width == fie->width && + s_data->frmfmt[i].size.height == fie->height) + break; + } + if (i >= s_data->numfmts) + return -EINVAL; + + /* Check index is in the rage of framerates array index */ + if (fie->index >= s_data->frmfmt[i].num_framerates) + return -EINVAL; + fie->index = array_index_nospec(fie->index, + s_data->frmfmt[i].num_framerates); + + fie->interval.numerator = 1; + fie->interval.denominator = + s_data->frmfmt[i].framerates[fie->index]; + + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_enum_frameintervals); + +void camera_common_mclk_disable(struct camera_common_data *s_data) +{ + struct camera_common_power_rail *pw = s_data->power; + + if (!pw) { + dev_err(s_data->dev, "%s: no device power rail\n", + __func__); + return; + } + + dev_dbg(s_data->dev, "%s: disable MCLK\n", __func__); + clk_disable_unprepare(pw->mclk); +} +EXPORT_SYMBOL_GPL(camera_common_mclk_disable); + +int camera_common_mclk_enable(struct camera_common_data *s_data) +{ + int err; + struct camera_common_power_rail *pw = s_data->power; + unsigned long mclk_init_rate = s_data->def_clk_freq; + + if (!pw) { + dev_err(s_data->dev, "%s: no device power rail\n", + __func__); + return -ENODEV; + } + + dev_dbg(s_data->dev, "%s: enable MCLK with %lu Hz\n", + __func__, mclk_init_rate); + + err = clk_set_rate(pw->mclk, mclk_init_rate); + if (!err) + err = clk_prepare_enable(pw->mclk); + + return err; +} +EXPORT_SYMBOL_GPL(camera_common_mclk_enable); + +void camera_common_dpd_disable(struct camera_common_data *s_data) +{ + int i; + int io_idx; + /* 2 lanes per port, divide by two to get numports */ + int numports = (s_data->numlanes + 1) >> 1; + + /* disable CSI IOs DPD mode to turn on camera */ + for (i = 0; i < numports; i++) { + io_idx = s_data->csi_port + i; + if (io_idx < 0) { + spec_bar(); + return; + } + if (atomic_inc_return( + &camera_common_csi_io_pads[io_idx].ref) == 1) + tegra_io_pad_power_enable(TEGRA_IO_PAD_CSIA + io_idx); + + dev_dbg(s_data->dev, + "%s: csi %d\n", __func__, io_idx); + } + + spec_bar(); +} + +void camera_common_dpd_enable(struct camera_common_data *s_data) +{ + int i; + int io_idx; + /* 2 lanes per port, divide by two to get numports */ + int numports = (s_data->numlanes + 1) >> 1; + + /* disable CSI IOs DPD mode to turn on camera */ + for (i = 0; i < numports; i++) { + io_idx = s_data->csi_port + i; + if (io_idx < 0) { + spec_bar(); + return; + } + if (atomic_dec_return( + &camera_common_csi_io_pads[io_idx].ref) == 0) + tegra_io_pad_power_disable(TEGRA_IO_PAD_CSIA + io_idx); + + dev_dbg(s_data->dev, + "%s: csi %d\n", __func__, io_idx); + } + + spec_bar(); +} + +int camera_common_s_power(struct v4l2_subdev *sd, int on) +{ + int err = 0; + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + + if (!s_data) + return -EINVAL; + + trace_camera_common_s_power("status", on); + if (on) { + err = call_s_op(s_data, power_on); + if (err) { + dev_err(s_data->dev, "%s: error power on\n", __func__); + return err; + } + } else { + call_s_op(s_data, power_off); + } + + return err; +} +EXPORT_SYMBOL_GPL(camera_common_s_power); + +int camera_common_get_mbus_config(struct v4l2_subdev *sd, + unsigned int pad, + struct v4l2_mbus_config *cfg) +{ + /* + * TODO Bug 200664694: If the sensor type is CPHY + * then return an error + */ + cfg->type = V4L2_MBUS_CSI2_DPHY; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + cfg->flags = V4L2_MBUS_CSI2_4_LANE | + V4L2_MBUS_CSI2_CHANNEL_0 | + V4L2_MBUS_CSI2_CONTINUOUS_CLOCK; +#else + cfg->bus.mipi_csi2.num_data_lanes = 4; +#endif + + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_get_mbus_config); + +int camera_common_get_framesync(struct v4l2_subdev *sd, + struct camera_common_framesync *fs) +{ + struct camera_common_data *s_data = to_camera_common_data(sd->dev); + int err = -ENOTSUPP; + + if (!s_data) + return -EINVAL; + + if (has_s_op(s_data, get_framesync)) + err = call_s_ops(s_data, get_framesync, fs); + + return err; +} +EXPORT_SYMBOL_GPL(camera_common_get_framesync); + +int camera_common_focuser_s_power(struct v4l2_subdev *sd, int on) +{ + int err = 0; + struct camera_common_focuser_data *s_data = + to_camera_common_focuser_data(sd->dev); + + if (on) { + err = call_s_op(s_data, power_on); + if (err) + dev_err(s_data->dev, + "%s: error power on\n", __func__); + } else + err = call_s_op(s_data, power_off); + + return err; +} +EXPORT_SYMBOL_GPL(camera_common_focuser_s_power); + +int camera_common_initialize(struct camera_common_data *s_data, + const char *dev_name) +{ + int err = 0; + char debugfs_name[35]; + + if (s_data->dev == NULL) + return -EINVAL; + + err = camera_common_parse_ports(s_data->dev, s_data); + if (err) { + dev_err(s_data->dev, "Failed to find port info.\n"); + return err; + } + + err = camera_common_parse_general_properties(s_data->dev, s_data); + if (err) { + dev_err(s_data->dev, "Failed to find general properties.\n"); + return err; + } + + err = sensor_common_init_sensor_properties(s_data->dev, + s_data->dev->of_node, + &s_data->sensor_props); + if (err) { + dev_err(s_data->dev, + "Could not initialize sensor properties.\n"); + return err; + } + + err = sprintf(debugfs_name, "%s_%c", dev_name, s_data->csi_port + 'a'); + if (err < 0) + return -EINVAL; + + dev_dbg(s_data->dev, "%s_probe: name %s\n", dev_name, debugfs_name); + + camera_common_create_debugfs(s_data, debugfs_name); + + return 0; +} +EXPORT_SYMBOL_GPL(camera_common_initialize); + +void camera_common_cleanup(struct camera_common_data *s_data) +{ + camera_common_remove_debugfs(s_data); +} +EXPORT_SYMBOL_GPL(camera_common_cleanup); + +int camera_common_focuser_init(struct camera_common_focuser_data *s_data) +{ + int err = 0; + + /* power on */ + err = call_s_op(s_data, power_on); + if (err) { + dev_err(s_data->dev, + "%s: error power on\n", __func__); + return err; + } + + /* load default configuration */ + err = call_s_op(s_data, load_config); + if (err) { + dev_err(s_data->dev, + "%s: error loading config\n", __func__); + goto fail; + } + + /* set controls */ + err = call_s_op(s_data, ctrls_init); + if (err) + dev_err(s_data->dev, + "%s: error initializing controls\n", __func__); + +fail: + /* power off */ + err |= call_s_op(s_data, power_off); + + return err; +} +EXPORT_SYMBOL_GPL(camera_common_focuser_init); + +/* + * Regmap / RTCPU I2C driver interface + */ + +int camera_common_i2c_init( + struct camera_common_i2c *sensor, + struct i2c_client *client, + struct regmap_config *regmap_config, + const struct tegra_i2c_rtcpu_config *rtcpu_config) +{ + sensor->regmap = devm_regmap_init_i2c(client, regmap_config); + if (IS_ERR(sensor->regmap)) { + dev_err(&client->dev, + "regmap init failed: %ld\n", PTR_ERR(sensor->regmap)); + return -ENODEV; + } + + sensor->rt_sensor = tegra_i2c_rtcpu_register_sensor( + client, rtcpu_config); + + return 0; +} +EXPORT_SYMBOL(camera_common_i2c_init); + +int camera_common_i2c_aggregate( + struct camera_common_i2c *sensor, + bool start) +{ + if (sensor->rt_sensor) + return tegra_i2c_rtcpu_aggregate(sensor->rt_sensor, start); + + return 0; +} +EXPORT_SYMBOL(camera_common_i2c_aggregate); + +int camera_common_i2c_set_frame_id( + struct camera_common_i2c *sensor, + int frame_id) +{ + if (sensor->rt_sensor) + return tegra_i2c_rtcpu_set_frame_id( + sensor->rt_sensor, frame_id); + + return 0; +} +EXPORT_SYMBOL(camera_common_i2c_set_frame_id); + +int camera_common_i2c_read_reg8( + struct camera_common_i2c *sensor, + unsigned int addr, + u8 *data, + unsigned int count) +{ + if (sensor->rt_sensor) + return tegra_i2c_rtcpu_read_reg8(sensor->rt_sensor, + addr, data, count); + else + return regmap_bulk_read(sensor->regmap, addr, data, count); +} +EXPORT_SYMBOL(camera_common_i2c_read_reg8); + +int camera_common_i2c_write_reg8( + struct camera_common_i2c *sensor, + unsigned int addr, + const u8 *data, + unsigned int count) +{ + if (sensor->rt_sensor) + return tegra_i2c_rtcpu_write_reg8(sensor->rt_sensor, + addr, data, count); + else + return regmap_bulk_write(sensor->regmap, addr, data, count); +} +EXPORT_SYMBOL(camera_common_i2c_write_reg8); + +int camera_common_i2c_write_table_8( + struct camera_common_i2c *sensor, + const struct reg_8 table[], + const struct reg_8 override_list[], + int num_override_regs, u16 wait_ms_addr, u16 end_addr) +{ + if (sensor->rt_sensor) + return tegra_i2c_rtcpu_write_table_8(sensor->rt_sensor, + table, override_list, num_override_regs, + wait_ms_addr, end_addr); + else + return regmap_util_write_table_8(sensor->regmap, + table, override_list, num_override_regs, + wait_ms_addr, end_addr); +} +EXPORT_SYMBOL(camera_common_i2c_write_table_8); diff --git a/drivers/media/platform/tegra/camera/camera_gpio.c b/drivers/media/platform/tegra/camera/camera_gpio.c new file mode 100644 index 00000000..9f982c4d --- /dev/null +++ b/drivers/media/platform/tegra/camera/camera_gpio.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * virtual.c - Camera GPIO driver + * + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "camera_gpio.h" + +struct camera_gpio { + struct list_head list; + unsigned gpio_num; + struct mutex mutex; + atomic_t state_cnt; + atomic_t use_cnt; +}; + +static DEFINE_MUTEX(g_mutex); +static LIST_HEAD(cam_gpio_list); + +int cam_gpio_register(struct device *dev, + unsigned pin_num) { + struct camera_gpio *new_gpio; + struct camera_gpio *next_gpio; + + mutex_lock(&g_mutex); + + + list_for_each_entry(next_gpio, &cam_gpio_list, list) { + if (next_gpio->gpio_num == pin_num) { + dev_dbg(dev, + "%s: gpio pin %u already registered.\n", + __func__, pin_num); + + atomic_inc(&next_gpio->use_cnt); + + mutex_unlock(&g_mutex); + return 0; + } + } + + /* gpio is not present in the cam_gpio_list, add it */ + new_gpio = kzalloc(sizeof(*new_gpio), GFP_KERNEL); + if (!new_gpio) { + dev_err(dev, "%s: memory low!\n", __func__); + mutex_unlock(&g_mutex); + return -EFAULT; + } + + dev_dbg(dev, "%s: adding cam gpio %u\n", + __func__, pin_num); + + new_gpio->gpio_num = pin_num; + mutex_init(&new_gpio->mutex); + atomic_inc(&new_gpio->use_cnt); + + list_add(&new_gpio->list, &cam_gpio_list); + + mutex_unlock(&g_mutex); + return 0; +} +EXPORT_SYMBOL(cam_gpio_register); + +void cam_gpio_deregister(struct device *dev, + unsigned pin_num) { + struct camera_gpio *next_gpio; + + mutex_lock(&g_mutex); + + + list_for_each_entry(next_gpio, &cam_gpio_list, list) { + if (next_gpio->gpio_num == pin_num) { + atomic_dec(&next_gpio->use_cnt); + + if (atomic_read(&next_gpio->use_cnt) == 0) { + list_del(&next_gpio->list); + kfree(next_gpio); + + dev_dbg(dev, + "%s: removing cam gpio %u\n", + __func__, pin_num); + } + + break; + } + } + + mutex_unlock(&g_mutex); + return; +} +EXPORT_SYMBOL(cam_gpio_deregister); + +int cam_gpio_ctrl(struct device *dev, + unsigned pin_num, int val, + bool active_high) /* val: 0=deassert, 1=assert */ +{ + struct camera_gpio *next_gpio; + int err = -EINVAL; + int pin_val; + bool found = false; + + list_for_each_entry(next_gpio, &cam_gpio_list, list) { + mutex_lock(&next_gpio->mutex); + if (next_gpio->gpio_num == pin_num) { + found = true; + + if (!atomic_read(&next_gpio->state_cnt) && + !val) { + dev_err(dev, + "%s: state cnt can't be < 0\n", + __func__); + mutex_unlock(&next_gpio->mutex); + return err; + } + + if (val) + atomic_inc(&next_gpio->state_cnt); + else + atomic_dec(&next_gpio->state_cnt); + + pin_val = active_high ? val : !val; + pin_val &= 1; + err = pin_val; + + /* subtract val allows a 0 check to be + * used to indicate that gpio can be written to*/ + if (atomic_read(&next_gpio->state_cnt) - val == 0) { + gpio_set_value_cansleep(pin_num, pin_val); + dev_dbg(dev, "%s %u %d\n", + __func__, pin_num, pin_val); + } + } + mutex_unlock(&next_gpio->mutex); + } + + if (!found) + dev_dbg(dev, + "WARNING %s: gpio %u not in list\n", + __func__, pin_num); + + return err; /* return value written or error */ +} +EXPORT_SYMBOL(cam_gpio_ctrl); diff --git a/drivers/media/platform/tegra/camera/camera_gpio.h b/drivers/media/platform/tegra/camera/camera_gpio.h new file mode 100644 index 00000000..43e40a77 --- /dev/null +++ b/drivers/media/platform/tegra/camera/camera_gpio.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __CAMERA_GPIO_H__ +#define __CAMERA_GPIO_H__ + +int cam_gpio_register(struct device *dev, + unsigned pin_num); + +void cam_gpio_deregister(struct device *dev, + unsigned pin_num); + +int cam_gpio_ctrl(struct device *dev, + unsigned pin_num, int ref_inc, bool active_high); + +#endif +/* __CAMERA_GPIO_H__ */ diff --git a/drivers/media/platform/tegra/camera/camera_version_utils.c b/drivers/media/platform/tegra/camera/camera_version_utils.c new file mode 100644 index 00000000..40488d18 --- /dev/null +++ b/drivers/media/platform/tegra/camera/camera_version_utils.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camera_version_utils.c - utilities for different kernel versions + * camera driver supports + * + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + */ +#include + +int tegra_media_entity_init(struct media_entity *entity, u16 num_pads, + struct media_pad *pad, bool is_subdev, bool is_sensor) +{ + if (!is_subdev) { + entity->obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE; + entity->function = MEDIA_ENT_F_IO_V4L; + } else { + entity->obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; + entity->function = is_sensor ? MEDIA_ENT_F_CAM_SENSOR : + MEDIA_ENT_F_OLD_SUBDEV_BASE + 10; + } + return media_entity_pads_init(entity, num_pads, pad); +} +EXPORT_SYMBOL(tegra_media_entity_init); + +bool tegra_is_v4l2_subdev(struct media_entity *entity) +{ + return is_media_entity_v4l2_subdev(entity); +} +EXPORT_SYMBOL(tegra_is_v4l2_subdev); + +int tegra_media_create_link(struct media_entity *source, u16 source_pad, + struct media_entity *sink, u16 sink_pad, u32 flags) +{ + int ret = 0; + + ret = media_create_pad_link(source, source_pad, + sink, sink_pad, flags); + return ret; +} +EXPORT_SYMBOL(tegra_media_create_link); + +bool tegra_v4l2_match_dv_timings(struct v4l2_dv_timings *t1, + struct v4l2_dv_timings *t2, + unsigned pclock_delta, + bool match_reduced_fps) +{ + return v4l2_match_dv_timings(t1, t2, pclock_delta, match_reduced_fps); +} +EXPORT_SYMBOL(tegra_v4l2_match_dv_timings); + +int tegra_vb2_dma_init(struct device *dev, void **alloc_ctx, + unsigned int size, atomic_t *refcount) +{ + int ret = 0; + + if (atomic_inc_return(refcount) > 1) + return 0; + + if (vb2_dma_contig_set_max_seg_size(dev, SZ_64K)) { + dev_err(dev, "failed to init vb2 buffer\n"); + ret = -ENOMEM; + } + return ret; +} +EXPORT_SYMBOL(tegra_vb2_dma_init); + +void tegra_vb2_dma_cleanup(struct device *dev, void *alloc_ctx, + atomic_t *refcount) +{ + if (atomic_dec_return(refcount) < 0) + dev_err(dev, "%s: put to negative references\n", __func__); + /* dont call vb2_dma_contig_clear_max_seg_size as it will */ + /* call kfree dma_parms but dma_parms is static member */ +} +EXPORT_SYMBOL(tegra_vb2_dma_cleanup); diff --git a/drivers/media/platform/tegra/camera/csi/csi.c b/drivers/media/platform/tegra/camera/csi/csi.c new file mode 100644 index 00000000..bbe795b9 --- /dev/null +++ b/drivers/media/platform/tegra/camera/csi/csi.c @@ -0,0 +1,1159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVIDIA Tegra CSI Device + * + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "mipical/mipi_cal.h" +#include +#include +#include "soc/tegra/camrtc-capture.h" +#include +#include "nvcsi/deskew.h" + +#define DEFAULT_NUM_TPG_CHANNELS 6 + +/* + * deskew should be run when the sensor data rate is >= 1.5 gbps + * data is sent on both rising/falling edges of clock, so /2 + */ +#define CLK_HZ_FOR_DESKEW ((1500*1000*1000)/2) + +static struct tegra_csi_device *mc_csi; + +struct tegra_csi_device *tegra_get_mc_csi(void) +{ + return mc_csi; +} +EXPORT_SYMBOL(tegra_get_mc_csi); + +static int set_csi_properties(struct tegra_csi_device *csi, + struct platform_device *pdev) +{ + struct camera_common_data *s_data = &csi->s_data[0]; + + /* + * These values are only used for tpg mode + * With sensor, CSI power and clock info are provided + * by the sensor sub device + */ + s_data->csi_port = 0; + s_data->numlanes = 12; + csi->clk_freq = TEGRA_CLOCK_CSI_PORT_MAX; + + return 0; +} + +static void update_blank_intervals(struct tegra_csi_channel *chan, + int portnum, int fmtindex) +{ + struct tegra_csi_port *port = &chan->ports[portnum]; + const struct tpg_frmfmt *tegra_csi_tpg_frmfmt = + chan->csi->tpg_frmfmt_table; + + port->framerate = tegra_csi_tpg_frmfmt[fmtindex].framerate; + port->h_blank = tegra_csi_tpg_frmfmt[fmtindex].h_blank; + port->v_blank = tegra_csi_tpg_frmfmt[fmtindex].v_blank; +} + +static struct sensor_mode_properties* +read_mode_from_dt(struct camera_common_data *s_data) +{ + struct sensor_mode_properties *mode = NULL; + + if (s_data) { + int idx = s_data->mode_prop_idx; + + if (idx < s_data->sensor_props.num_modes) + mode = &s_data->sensor_props.sensor_modes[idx]; + } + + return mode; +} + +u32 read_settle_time_from_dt(struct tegra_csi_channel *chan) +{ + struct camera_common_data *s_data = chan->s_data; + struct sensor_mode_properties *mode = read_mode_from_dt(s_data); + struct device *dev = chan->csi->dev; + unsigned int cil_settletime = 0; + + if (mode) { + dev_dbg(dev, "settle time reading from props\n"); + cil_settletime = mode->signal_properties.cil_settletime; + } else if (chan->of_node) { + int err = 0; + const char *str; + + dev_dbg(dev, "settle time reading from of_node\n"); + err = of_property_read_string(chan->of_node, "cil_settletime", + &str); + if (!err) { + err = kstrtou32(str, 10, &cil_settletime); + if (err) { + dev_dbg(dev, + "no cil_settletime in of_node"); + cil_settletime = 0; + } + } + } + + return cil_settletime; +} + +u32 read_phy_mode_from_dt(struct tegra_csi_channel *chan) +{ + struct camera_common_data *s_data = chan->s_data; + struct sensor_mode_properties *mode = read_mode_from_dt(s_data); + struct device *dev = chan->csi->dev; + u32 phy_mode = 0; + + if (mode) { + dev_dbg(dev, "settle time reading from props\n"); + phy_mode = mode->signal_properties.phy_mode; + } else { + dev_dbg(dev, "phy mode unavailable in props, use default\n"); + phy_mode = CSI_PHY_MODE_DPHY; + } + + return phy_mode; +} + +u64 read_mipi_clk_from_dt(struct tegra_csi_channel *chan) +{ + struct sensor_signal_properties *sig_props; + struct sensor_properties *props; + u64 mipi_clk = 0; + int mode_idx; + + if (chan && chan->s_data) { + mode_idx = chan->s_data->mode_prop_idx; + props = &chan->s_data->sensor_props; + sig_props = &props->sensor_modes[mode_idx].signal_properties; + mipi_clk = sig_props->mipi_clock.val; + } + + return mipi_clk; +} + +void set_csi_portinfo(struct tegra_csi_device *csi, + unsigned int port, unsigned int numlanes) +{ + struct camera_common_data *s_data = &csi->s_data[port]; + + s_data->csi_port = port; + s_data->numlanes = numlanes; + s_data->def_clk_freq = TEGRA_CLOCK_CSI_PORT_MAX; +} +EXPORT_SYMBOL(set_csi_portinfo); + +int tegra_csi_power(struct tegra_csi_device *csi, + struct tegra_csi_channel *chan, int enable) +{ + int err = 0; + + trace_csi_s_power("enable", enable); + if (enable) { + err = csi->fops->csi_power_on(csi); + if (!err) + atomic_inc(&csi->power_ref); + } else { + err = csi->fops->csi_power_off(csi); + if (!err) + atomic_dec(&csi->power_ref); + } + return err; +} +EXPORT_SYMBOL(tegra_csi_power); + +int tegra_csi_error_recovery(struct tegra_channel *chan, + struct tegra_csi_device *csi, struct tegra_csi_channel *csi_chan) +{ + int err = 0; + int i = 0; + struct tegra_csi_port *port = &csi_chan->ports[i]; + const char *rec_err_msg = + "%s: failed to recover pt %u st %u vc %u (pg_mode %d)\n"; + + for (i = 0; i < chan->valid_ports; i++) { + err = csi->fops->csi_error_recover(csi_chan, i); + if (err) { + dev_err(csi->dev, rec_err_msg, __func__, + port->csi_port, port->stream_id, + port->virtual_channel_id, chan->pg_mode); + break; + } + } + + return err; +} +EXPORT_SYMBOL(tegra_csi_error_recovery); + +static int tegra_csi_s_power(struct v4l2_subdev *subdev, int enable) +{ + int err = 0; + struct tegra_csi_device *csi = to_csi(subdev); + struct tegra_csi_channel *chan = to_csi_chan(subdev); + + err = tegra_csi_power(csi, chan, enable); + + return err; +} + +#if 0 /* disable for Canonical kernel */ +static int tegra_csi_sync_event(struct v4l2_subdev *subdev, + unsigned int sync_events) +{ + int err = 0; + struct tegra_channel *chan = v4l2_get_subdev_hostdata(subdev); + struct tegra_csi_device *csi = to_csi(subdev); + struct tegra_csi_channel *csi_chan = to_csi_chan(subdev); + + if (sync_events & V4L2_SYNC_EVENT_SUBDEV_ERROR_RECOVER) + err = tegra_csi_error_recovery(chan, csi, csi_chan); + + return err; +} +#endif + +/* + * ----------------------------------------------------------------------------- + * CSI Subdevice Video Operations + * ----------------------------------------------------------------------------- + */ + +int tegra_csi_start_streaming(struct tegra_csi_channel *chan, int port_idx) +{ + struct tegra_csi_device *csi = chan->csi; + + return csi->fops->csi_start_streaming(chan, port_idx); +} +EXPORT_SYMBOL(tegra_csi_start_streaming); + +void tegra_csi_stop_streaming(struct tegra_csi_channel *chan, int port_idx) +{ + struct tegra_csi_device *csi = chan->csi; + + csi->fops->csi_stop_streaming(chan, port_idx); +} +EXPORT_SYMBOL(tegra_csi_stop_streaming); + +int tegra_csi_tpg_set_gain(struct v4l2_subdev *sd, void *arg) +{ + struct tegra_csi_channel *chan = to_csi_chan(sd); + struct tegra_csi_device *csi = to_csi(sd); + int *val = arg; + + if (!chan->pg_mode) { + dev_err(chan->csi->dev, "CSI is not in TPG mode\n"); + return -EINVAL; + } + + return csi->fops->tpg_set_gain(chan, *val); +} +EXPORT_SYMBOL(tegra_csi_tpg_set_gain); + +static int update_video_source(struct tegra_csi_device *csi, int on, int is_tpg) +{ + mutex_lock(&csi->source_update); + if (!on) { + if (is_tpg) + csi->tpg_active--; + else + csi->sensor_active--; + WARN_ON(csi->tpg_active < 0 || csi->sensor_active < 0); + goto stream_okay; + } + if (is_tpg && csi->tpg_active >= 0 && !csi->sensor_active) { + csi->tpg_active++; + goto stream_okay; + } + if (!is_tpg && csi->sensor_active >= 0 && !csi->tpg_active) { + csi->sensor_active++; + goto stream_okay; + } + mutex_unlock(&csi->source_update); + dev_err(csi->dev, "Request rejected for new %s stream\n", + is_tpg ? "tpg" : "sensor"); + dev_err(csi->dev, "Active tpg streams %d, active sensor streams %d\n", + csi->tpg_active, csi->sensor_active); + return -EINVAL; +stream_okay: + mutex_unlock(&csi->source_update); + return 0; +} + +static void deskew_setup(struct tegra_csi_channel *chan, + struct nvcsi_deskew_context *deskew_ctx) +{ + struct sensor_signal_properties *sig_props; + struct sensor_properties *props; + int i; + int mode_idx = -1; + u64 pix_clk_hz = 0; + u32 deskew_enable = 0; + unsigned int csi_lane_start = 0; + unsigned int csi_port, csi_lanes; + + if (chan->s_data == NULL) + return; + + mode_idx = chan->s_data->mode_prop_idx; + props = &chan->s_data->sensor_props; + sig_props = &props->sensor_modes[mode_idx].signal_properties; + if (sig_props->serdes_pixel_clock.val != 0ULL) + pix_clk_hz = sig_props->serdes_pixel_clock.val; + else + pix_clk_hz = sig_props->pixel_clock.val; + deskew_enable = sig_props->deskew_initial_enable; + + if (pix_clk_hz >= CLK_HZ_FOR_DESKEW && deskew_enable) { + csi_port = chan->ports[0].csi_port; + csi_lanes = chan->ports[0].lanes; + switch (csi_port) { + case NVCSI_PORT_A: + csi_lane_start = NVCSI_PHY_0_NVCSI_CIL_A_IO0; + break; + case NVCSI_PORT_B: + csi_lane_start = NVCSI_PHY_0_NVCSI_CIL_B_IO0; + break; + case NVCSI_PORT_C: + csi_lane_start = NVCSI_PHY_1_NVCSI_CIL_A_IO0; + break; + case NVCSI_PORT_D: + csi_lane_start = NVCSI_PHY_1_NVCSI_CIL_B_IO0; + break; + case NVCSI_PORT_E: + csi_lane_start = NVCSI_PHY_2_NVCSI_CIL_A_IO0; + break; + case NVCSI_PORT_F: + csi_lane_start = NVCSI_PHY_2_NVCSI_CIL_B_IO0; + break; + case NVCSI_PORT_G: + csi_lane_start = NVCSI_PHY_3_NVCSI_CIL_A_IO0; + break; + case NVCSI_PORT_H: + csi_lane_start = NVCSI_PHY_3_NVCSI_CIL_B_IO0; + break; + default: + break; + } + deskew_ctx->deskew_lanes = 0; + for (i = 0; i < csi_lanes; ++i) + deskew_ctx->deskew_lanes |= csi_lane_start << i; + nvcsi_deskew_setup(deskew_ctx); + } + +} + +static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable) +{ + struct tegra_csi_device *csi; + struct tegra_csi_channel *chan = to_csi_chan(subdev); + struct tegra_channel *tegra_chan = v4l2_get_subdev_hostdata(subdev); + int i, ret = 0; + + if (atomic_read(&chan->is_streaming) == enable) + return 0; + trace_csi_s_stream("enable", enable); + csi = to_csi(subdev); + if (!csi) + return -EINVAL; + ret = update_video_source(csi, enable, chan->pg_mode); + if (ret) + return ret; + + /* if it is bypass and real sensor, return here + * else let tegra_csi_start_streaming handle it + * depending on bypass and pg_mode flags + */ + if (tegra_chan->bypass && !tegra_chan->pg_mode) { + atomic_set(&chan->is_streaming, enable); + return 0; + } + for (i = 0; i < tegra_chan->valid_ports; i++) { + if (enable) { + ret = tegra_csi_start_streaming(chan, i); + if (ret) + goto start_fail; + if (!tegra_chan->bypass && !tegra_chan->pg_mode) + deskew_setup(chan, + tegra_chan->deskew_ctx); + } else + tegra_csi_stop_streaming(chan, i); + } + atomic_set(&chan->is_streaming, enable); + return ret; +start_fail: + update_video_source(csi, 0, chan->pg_mode); + /* Reverse sequence to stop streaming on all valid_ports + * i is the current failing port, need to stop ports 0 ~ (i-1) + */ + for (i = i - 1; i >= 0; i--) + tegra_csi_stop_streaming(chan, i); + return ret; +} + + +/* Used to calculate the settling time based on the mipi and cil clocks */ +unsigned int tegra_csi_ths_settling_time( + struct tegra_csi_device *csi, + const unsigned int csicil_clk_mhz, + const unsigned int mipi_clk_mhz) +{ + unsigned int cil_settletime; + + cil_settletime = (115 * csicil_clk_mhz + 8000 * csicil_clk_mhz + / (2 * mipi_clk_mhz) - 5500) / 1000; + return cil_settletime; +} +EXPORT_SYMBOL(tegra_csi_ths_settling_time); + +unsigned int tegra_csi_clk_settling_time( + struct tegra_csi_device *csi, + const unsigned int csicil_clk_mhz) +{ + unsigned int clk_settletime; + + clk_settletime = ((95 + 300) * csicil_clk_mhz - 13000) / 2000; + return clk_settletime; +} +EXPORT_SYMBOL(tegra_csi_clk_settling_time); + +/* + * Only use this subdevice media bus ops for test pattern generator, + * because CSI device is an separated subdevice which has 6 source + * pads to generate test pattern. + */ +static struct v4l2_mbus_framefmt tegra_csi_tpg_fmts[] = { + { + TEGRA_DEF_WIDTH, + TEGRA_DEF_HEIGHT, + MEDIA_BUS_FMT_SRGGB10_1X10, + V4L2_FIELD_NONE, + V4L2_COLORSPACE_SRGB + }, + { + TEGRA_DEF_WIDTH, + TEGRA_DEF_HEIGHT, + MEDIA_BUS_FMT_RGB888_1X32_PADHI, + V4L2_FIELD_NONE, + V4L2_COLORSPACE_SRGB + }, + { + TEGRA_DEF_WIDTH, + TEGRA_DEF_HEIGHT, + MEDIA_BUS_FMT_UYVY8_1X16, + V4L2_FIELD_NONE, + V4L2_COLORSPACE_SRGB + }, +}; + +static struct v4l2_frmsize_discrete tegra_csi_tpg_sizes[] = { + {320, 240}, + {1280, 720}, + {1920, 1080}, + {3840, 2160} +}; + +static int tegra_csi_enum_framesizes(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse) +{ + int i; + struct tegra_csi_channel *chan = to_csi_chan(sd); + + if (!chan->pg_mode) + return -ENOIOCTLCMD; + + if (fse->index >= ARRAY_SIZE(tegra_csi_tpg_sizes)) + return -EINVAL; + fse->index = array_index_nospec(fse->index, + ARRAY_SIZE(tegra_csi_tpg_sizes)); + + for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++) { + if (tegra_csi_tpg_fmts[i].code == fse->code) + break; + } + if (i == ARRAY_SIZE(tegra_csi_tpg_fmts)) + return -EINVAL; + + fse->min_width = fse->max_width = + tegra_csi_tpg_sizes[fse->index].width; + fse->min_height = fse->max_height = + tegra_csi_tpg_sizes[fse->index].height; + return 0; +} + +static int tegra_csi_get_fmtindex(struct tegra_csi_channel *chan, + int width, int height, int pixel_format) +{ + int i; + const struct tpg_frmfmt *tegra_csi_tpg_frmfmt = + chan->csi->tpg_frmfmt_table; + + for (i = 0; i < chan->csi->tpg_frmfmt_table_size; i++) { + if (tegra_csi_tpg_frmfmt[i].frmsize.width == width && + tegra_csi_tpg_frmfmt[i].frmsize.height == height && + tegra_csi_tpg_frmfmt[i].pixel_format == pixel_format) + break; + } + + if (i == chan->csi->tpg_frmfmt_table_size) + return -EINVAL; + + return i; +} + +static int tegra_csi_enum_frameintervals(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie) +{ + int index; + struct tegra_csi_channel *chan = to_csi_chan(sd); + const struct tegra_video_format *format; + const struct tpg_frmfmt *tegra_csi_tpg_frmfmt = + chan->csi->tpg_frmfmt_table; + struct tegra_channel *vi_chan = v4l2_get_subdev_hostdata(sd); + + if (!chan->pg_mode) + return -ENOIOCTLCMD; + + /* One resolution just one framerate */ + if (fie->index > 0) + return -EINVAL; + format = tegra_core_get_format_by_code(vi_chan, fie->code, 0); + if (!format) + return -EINVAL; + index = tegra_csi_get_fmtindex(chan, fie->width, fie->height, + format->fourcc); + if (index < 0) + return -EINVAL; + + fie->interval.numerator = 1; + fie->interval.denominator = tegra_csi_tpg_frmfmt[index].framerate; + + return 0; +} + +static int tegra_csi_try_mbus_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) +{ + int i, j; + struct tegra_csi_channel *chan = to_csi_chan(sd); + const struct v4l2_frmsize_discrete *sizes; + + if (!chan->pg_mode) + return -ENOIOCTLCMD; + + for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++) { + struct v4l2_mbus_framefmt *fmt = &tegra_csi_tpg_fmts[i]; + + if (mf->code == fmt->code) { + for (j = 0; j < ARRAY_SIZE(tegra_csi_tpg_sizes); j++) { + sizes = &tegra_csi_tpg_sizes[j]; + if (mf->width == sizes->width && + mf->height == sizes->height) + return 0; + } + } + } + + dev_info(chan->csi->dev, "use TEGRA_DEF_WIDTH x TEGRA_DEF_HEIGHT (1920x1080)\n"); + memcpy(mf, tegra_csi_tpg_fmts, sizeof(struct v4l2_mbus_framefmt)); + + return 0; +} + +static int tegra_csi_g_mbus_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *fmt) +{ + struct tegra_csi_channel *chan = to_csi_chan(sd); + struct v4l2_mbus_framefmt *format = &chan->ports[0].format; + + if (!chan->pg_mode) { + dev_err(chan->csi->dev, "CSI is not in TPG mode\n"); + return -EINVAL; + } + + mutex_lock(&chan->format_lock); + memcpy(fmt, format, sizeof(struct v4l2_mbus_framefmt)); + mutex_unlock(&chan->format_lock); + + return 0; +} + +static int csi_is_power_on(struct tegra_csi_device *csi) +{ + return atomic_read(&csi->power_ref); +} +static int tegra_csi_g_input_status(struct v4l2_subdev *sd, u32 *status) +{ + struct tegra_csi_device *csi = to_csi(sd); + + /* Set status to 0 if power is on + * Set status to 1 if power is off + */ + *status = !csi_is_power_on(csi); + + return 0; +} + +/* ----------------------------------------------------------------------------- + * V4L2 Subdevice Pad Operations + */ + +static int tegra_csi_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *fmt) +{ + struct tegra_csi_channel *chan = to_csi_chan(subdev); + struct v4l2_mbus_framefmt *mbus_fmt = &fmt->format; + int ret; + + if (!chan->pg_mode) + return -ENOIOCTLCMD; + ret = tegra_csi_g_mbus_fmt(subdev, mbus_fmt); + if (ret) + return ret; + + return 0; +} + +static int tegra_csi_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *fmt) +{ + int ret; + struct tegra_csi_channel *chan = to_csi_chan(subdev); + struct v4l2_mbus_framefmt *format = &fmt->format; + const struct tegra_video_format *vf; + struct tegra_channel *vi_chan = v4l2_get_subdev_hostdata(subdev); + int index, i; + + if (!chan->pg_mode) + return -ENOIOCTLCMD; + + ret = tegra_csi_try_mbus_fmt(subdev, format); + if (ret) + return ret; + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) + return 0; + + vf = tegra_core_get_format_by_code(vi_chan, format->code, 0); + if (!vf) { + dev_err(chan->csi->dev, "Fail to find tegra video fmt"); + mutex_unlock(&chan->format_lock); + return -EINVAL; + } + index = tegra_csi_get_fmtindex(chan, format->width, + format->height, vf->fourcc); + if (index < 0) { + dev_err(chan->csi->dev, "Fail to find matching fmt"); + return -EINVAL; + } + + mutex_lock(&chan->format_lock); + for (i = 0; i < vi_chan->valid_ports; i++) { + memcpy(&chan->ports[i].format, + &fmt->format, sizeof(struct v4l2_mbus_framefmt)); + chan->ports[i].core_format = vf; + update_blank_intervals(chan, i, index); + } + mutex_unlock(&chan->format_lock); + + return 0; +} + +static int tegra_csi_g_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_frame_interval *vfi) +{ + struct tegra_csi_channel *chan = to_csi_chan(sd); + struct tegra_csi_port *port = &chan->ports[0]; + + if (!port->framerate) + return -EINVAL; + + vfi->interval.numerator = 1; + vfi->interval.denominator = port->framerate; + + return 0; +} + +static int tegra_csi_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (code->index >= ARRAY_SIZE(tegra_csi_tpg_fmts)) + return -EINVAL; + + code->code = tegra_csi_tpg_fmts[code->index].code; + return 0; +} + +/* ----------------------------------------------------------------------------- + * V4L2 Subdevice Operations + */ +static struct v4l2_subdev_video_ops tegra_csi_video_ops = { + .s_stream = tegra_csi_s_stream, + .g_input_status = tegra_csi_g_input_status, + .g_frame_interval = tegra_csi_g_frame_interval, +}; + +static struct v4l2_subdev_pad_ops tegra_csi_pad_ops = { + .get_fmt = tegra_csi_get_format, + .set_fmt = tegra_csi_set_format, + .enum_mbus_code = tegra_csi_enum_mbus_code, + .enum_frame_size = tegra_csi_enum_framesizes, + .enum_frame_interval = tegra_csi_enum_frameintervals, +}; + +static struct v4l2_subdev_core_ops tegra_csi_core_ops = { + .s_power = tegra_csi_s_power, +#if 0 /* disable for Canonical kernel */ + .sync = tegra_csi_sync_event, +#endif +}; + +static struct v4l2_subdev_ops tegra_csi_ops = { + .core = &tegra_csi_core_ops, + .video = &tegra_csi_video_ops, + .pad = &tegra_csi_pad_ops, +}; + +/* ----------------------------------------------------------------------------- + * Media Operations + */ + +static const struct media_entity_operations tegra_csi_media_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +/* ----------------------------------------------------------------------------- + * Platform Device Driver + */ + +static int tegra_csi_get_port_info(struct tegra_csi_channel *chan, + struct device_node *node, unsigned int index) +{ + struct device_node *ep = NULL; + struct device_node *ports; + struct device_node *port; + struct device_node *chan_dt; + + int value = 0xFFFF; + int ret = 0; + u32 i = 0; + + memset(&chan->port[0], INVALID_CSI_PORT, TEGRA_CSI_BLOCKS); + for_each_child_of_node(node, chan_dt) { + if (!chan_dt->name || of_node_cmp(chan_dt->name, "channel")) + continue; + ret = of_property_read_u32(chan_dt, "reg", &value); + if (ret < 0) + return -EINVAL; + chan->of_node = chan_dt; + if (value == index) + break; + } + + chan->subdev.fwnode = of_fwnode_handle(chan_dt); + ports = of_get_child_by_name(chan_dt, "ports"); + if (ports == NULL) + return -EINVAL; + + for_each_child_of_node(ports, port) { + if (!port->name || of_node_cmp(port->name, "port")) + continue; + ret = of_property_read_u32(port, "reg", &value); + if (ret < 0) + continue; + if (value != 0) + continue; + for_each_child_of_node(port, ep) { + if (!ep->name || of_node_cmp(ep->name, "endpoint")) + continue; + ret = of_property_read_u32(ep, "port-index", &value); + if (ret < 0) + dev_err(chan->csi->dev, "No port index info\n"); + chan->port[0] = value; + + ret = of_property_read_u32(ep, "bus-width", &value); + if (ret < 0) + dev_err(chan->csi->dev, "No bus width info\n"); + chan->numlanes = value; + if (value > 12) { + dev_err(chan->csi->dev, "Invalid num lanes\n"); + return -EINVAL; + } + /* + * for numlanes greater than 4 multiple CSI bricks + * are needed to capture the image, the logic below + * checks for numlanes > 4 and add a new CSI brick + * as a valid port. Loops around the three CSI + * bricks to add as many ports necessary. + */ + value -= 4; + for (i = 1; value > 0 && i < TEGRA_CSI_BLOCKS; i++, value -= 4) { + int next_port = chan->port[i-1] + 2; + + next_port = (next_port % (NVCSI_PORT_H + 1)); + chan->port[i] = next_port; + } + } + } + + for (i = 0; csi_port_is_valid(chan->port[i]); i++) + chan->numports++; + + return 0; +} + +int tegra_csi_init(struct tegra_csi_device *csi, + struct platform_device *pdev) +{ + int err = 0; + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + + csi->dev = &pdev->dev; + err = set_csi_properties(csi, pdev); + if (err) + return err; + + csi->iomem_base = pdata->aperture[0]; + csi->fops->hw_init(csi); + return err; +} + +static int tegra_csi_channel_init_one(struct tegra_csi_channel *chan) +{ + struct v4l2_subdev *sd; + int numlanes = 0; + struct tegra_csi_device *csi = chan->csi; + int i, ret; + const struct tegra_video_format *vf; + + mutex_init(&chan->format_lock); + + vf = tegra_core_get_default_format(); + if (vf == NULL) { + dev_err(csi->dev, "Fail to find tegra video fmt"); + return -EINVAL; + } + + atomic_set(&chan->is_streaming, 0); + sd = &chan->subdev; + /* Initialize V4L2 subdevice and media entity */ + v4l2_subdev_init(sd, &tegra_csi_ops); + sd->dev = chan->csi->dev; + v4l2_set_subdevdata(sd, csi); + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + sd->entity.ops = &tegra_csi_media_ops; + chan->ports = devm_kzalloc(csi->dev, + chan->numports * sizeof(struct tegra_csi_port), + GFP_KERNEL); + if (!chan->ports) + return -ENOMEM; + + /* Initialize the default format */ + for (i = 0; i < chan->numports; i++) { + chan->ports[i].format.code = vf->vf_code; + chan->ports[i].format.field = V4L2_FIELD_NONE; + chan->ports[i].format.colorspace = V4L2_COLORSPACE_SRGB; + chan->ports[i].format.width = TEGRA_DEF_WIDTH; + chan->ports[i].format.height = TEGRA_DEF_HEIGHT; + chan->ports[i].core_format = vf; + } + if (chan->pg_mode) { + /* If CSI has 2 existing channels, chan->id will start + * from 2 for the first TPG channel, which uses PORT_A(0). + * To get the correct PORT number, subtract existing number of + * channels from chan->id. + * when virtual channel is used, tpg0->pp0/vc0, tpg1->pp1/vc0, + * tpg2->pp2/vc0, tpg3->pp3/vc0, tpg4->pp4/vc0, tpg5->pp5/vc0, + * tpg6->pp0/vc1, tpg7->pp1/vc1 etc. + * pp means pixel parser, correspond to port[0] below. + * tpg id correspond to chan->id + */ + chan->port[0] = (chan->id - csi->num_channels) + % NUM_TPG_INSTANCE; + WARN_ON(chan->port[0] > csi->num_tpg_channels); + chan->ports[0].stream_id = chan->port[0]; + chan->ports[0].virtual_channel_id + = (chan->id - csi->num_channels) / NUM_TPG_INSTANCE; + chan->ports->lanes = 2; + chan->pads = devm_kzalloc(csi->dev, sizeof(*chan->pads), + GFP_KERNEL); + if (!chan->pads) + return -ENOMEM; + chan->pads[0].flags = MEDIA_PAD_FL_SOURCE; + } else { + chan->pads = devm_kzalloc(csi->dev, 2 * sizeof(*chan->pads), + GFP_KERNEL); + if (!chan->pads) + return -ENOMEM; + chan->pads[0].flags = MEDIA_PAD_FL_SINK; + chan->pads[1].flags = MEDIA_PAD_FL_SOURCE; + } + ret = snprintf(sd->name, sizeof(sd->name), "%s-%d", + chan->pg_mode ? "tpg" : + (strlen(csi->devname) == 0 ? + dev_name(csi->dev) : csi->devname), + (chan->id - csi->num_channels)); + if (ret < 0) + return -EINVAL; + + /* Initialize media entity */ + ret = tegra_media_entity_init(&sd->entity, chan->pg_mode ? 1 : 2, + chan->pads, true, false); + if (ret < 0) + return ret; + + for (i = 0; i < chan->numports; i++) { + numlanes = chan->numlanes - (i * MAX_CSI_BLOCK_LANES); + WARN_ON(numlanes < 0); + numlanes = numlanes > MAX_CSI_BLOCK_LANES ? + MAX_CSI_BLOCK_LANES : numlanes; + chan->ports[i].lanes = numlanes; + + if (!chan->pg_mode) + chan->ports[i].csi_port = chan->port[i]; + } + + if (!chan->pg_mode) { + ret = v4l2_async_register_subdev(sd); + if (ret < 0) { + dev_err(csi->dev, "failed to register subdev\n"); + media_entity_cleanup(&sd->entity); + } + } + return ret; +} + +static int tegra_csi_channels_init(struct tegra_csi_device *csi) +{ + int ret; + struct tegra_csi_channel *it; + + list_for_each_entry(it, &csi->csi_chans, list) { + ret = tegra_csi_channel_init_one(it); + if (ret) + return ret; + } + + return 0; +} + +static int csi_parse_dt(struct tegra_csi_device *csi, + struct platform_device *pdev) +{ + int err = 0, i; + int num_channels = 0, num_tpg_channels = 0; + struct device_node *node = pdev->dev.of_node; + struct tegra_csi_channel *item; + + if (strncmp(node->name, "nvcsi", 5)) { + node = of_find_node_by_name(node, "nvcsi"); + strncpy(csi->devname, "nvcsi", 6); + } + + if (node) { + err = of_property_read_u32(node, "num-channels", &num_channels); + if (err) { + dev_dbg(csi->dev, " Failed to find num of channels, set to 0\n"); + num_channels = 0; + } + + err = of_property_read_u32(node, "num-tpg-channels", + &num_tpg_channels); + /* Backward compatibility for T210 and T186. + * They both can generate 6 tpg streams, so use 6 + * as default if DT entry is missing. + * For future chips, add this DT entry to + * create correct number of tpg video nodes + */ + if (err) + num_tpg_channels = DEFAULT_NUM_TPG_CHANNELS; + } else { + num_channels = 0; + num_tpg_channels = DEFAULT_NUM_TPG_CHANNELS; + } + + csi->num_tpg_channels = num_tpg_channels; + csi->num_channels = num_channels; + for (i = 0; i < num_channels; i++) { + item = devm_kzalloc(csi->dev, sizeof(*item), GFP_KERNEL); + if (!item) + return -ENOMEM; + list_add_tail(&item->list, &csi->csi_chans); + item->csi = csi; + item->id = i; + err = tegra_csi_get_port_info(item, node, item->id); + if (err) + return err; + } + + return 0; +} + +int tpg_csi_media_controller_init(struct tegra_csi_device *csi, int pg_mode) +{ + int i, err; + struct tegra_csi_channel *item; + + if (!csi) + return -EINVAL; + for (i = 0; i < csi->num_tpg_channels; i++) { + item = devm_kzalloc(csi->dev, sizeof(*item), GFP_KERNEL); + if (!item) { + err = -ENOMEM; + goto channel_init_error; + } + if (i == 0) + csi->tpg_start = item; + list_add_tail(&item->list, &csi->csi_chans); + item->numlanes = 2; + item->numports = 1; + item->csi = csi; + item->pg_mode = pg_mode; + item->id = csi->num_channels + i; + err = tegra_csi_channel_init_one(item); + if (err) + goto channel_init_error; + } + csi->fops->hw_init(csi); + csi->num_channels += csi->num_tpg_channels; + + return err; + +channel_init_error: + if (csi->tpg_start) + tpg_csi_media_controller_cleanup(csi); + dev_err(csi->dev, "%s: Error\n", __func__); + return err; +} +EXPORT_SYMBOL(tpg_csi_media_controller_init); + +void tpg_csi_media_controller_cleanup(struct tegra_csi_device *csi) +{ + struct tegra_csi_channel *item; + struct tegra_csi_channel *itemn; + struct v4l2_subdev *sd; + + list_for_each_entry_safe(item, itemn, &csi->csi_chans, list) { + if (!item->pg_mode) + continue; + sd = &item->subdev; + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + list_del(&item->list); + devm_kfree(csi->dev, item); + } + csi->num_channels -= csi->num_tpg_channels; + csi->tpg_start = NULL; +} +EXPORT_SYMBOL(tpg_csi_media_controller_cleanup); + +int tegra_csi_mipi_calibrate(struct tegra_csi_device *csi, + bool on) +{ + struct tegra_csi_channel *chan; + + if (list_empty(&csi->csi_chans)) + return 0; + + if (!on) { + tegra_mipi_bias_pad_disable(); + return 0; + } + + tegra_mipi_bias_pad_enable(); + + list_for_each_entry(chan, &csi->csi_chans, list) { + int ret = 0; + + if (chan->pg_mode) + continue; + + if (chan->s_data == NULL) + continue; + + ret = csi->fops->mipical(chan); + if (ret) + dev_err(csi->dev, + "calibration failed with %d error\n", ret); + } + + return 0; +} + +int tegra_csi_media_controller_init(struct tegra_csi_device *csi, + struct platform_device *pdev) +{ + int ret; + + if (!csi) + return -EINVAL; + mc_csi = csi; + + csi->dev = &pdev->dev; + csi->pdev = pdev; + csi->tpg_active = 0; + csi->sensor_active = 0; + atomic_set(&csi->power_ref, 0); + mutex_init(&csi->source_update); + INIT_LIST_HEAD(&csi->csi_chans); + ret = csi_parse_dt(csi, pdev); + if (ret < 0) + return ret; + + /* + * if there is no csi channels listed in DT, + * no need to init the channel and graph + */ + if (csi->num_channels > 0) { + ret = tegra_csi_channels_init(csi); + if (ret < 0) + dev_err(&pdev->dev, "Failed to init csi channel\n"); + } + + ret = tegra_csi_init(csi, pdev); + if (ret < 0) + dev_err(&pdev->dev, "Failed to init csi property,clks\n"); + + return 0; +} +EXPORT_SYMBOL(tegra_csi_media_controller_init); + +int tegra_csi_media_controller_remove(struct tegra_csi_device *csi) +{ + struct tegra_csi_channel *chan; + struct v4l2_subdev *sd; + + list_for_each_entry(chan, &csi->csi_chans, list) { + sd = &chan->subdev; + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } + return 0; +} +EXPORT_SYMBOL(tegra_csi_media_controller_remove); diff --git a/drivers/media/platform/tegra/camera/fusa-capture/capture-common.c b/drivers/media/platform/tegra/camera/fusa-capture/capture-common.c new file mode 100644 index 00000000..a61c3c51 --- /dev/null +++ b/drivers/media/platform/tegra/camera/fusa-capture/capture-common.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + +/** + * @file drivers/media/platform/tegra/camera/fusa-capture/capture-common.c + * + * @brief VI/ISP channel common operations for the T186/T194 Camera RTCPU + * platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +/** + * @brief Capture buffer management table. + */ +struct capture_buffer_table { + struct device *dev; /**< Originating device (VI or ISP) */ + struct kmem_cache *cache; /**< SLAB allocator cache */ + rwlock_t hlock; /**< Reader/writer lock on table contents */ + DECLARE_HASHTABLE(hhead, 4U); /**< Buffer hashtable head */ +}; + +/** + * @brief Capture surface NvRm and IOVA addresses handle. + */ +union capture_surface { + uint64_t raw; /**< Pinned VI or ISP IOVA address */ + struct { + uint32_t offset; /**< NvRm handle (upper 32 bits) */ + uint32_t hmem; + /**< + * Offset of surface or pushbuffer address in descriptor + * (lower 32 bits) [byte] + */ + }; +}; + +/** + * @brief Capture buffer mapping (pinned). + */ +struct capture_mapping { + struct hlist_node hnode; /**< Hash table node struct */ + atomic_t refcnt; /**< Capture mapping reference count */ + struct dma_buf *buf; /** Capture mapping dma_buf */ + struct dma_buf_attachment *atch; + /**< dma_buf attachment (VI or ISP device) */ + struct sg_table *sgt; /**< Scatterlist to dma_buf attachment */ + unsigned int flag; /**< Bitmask access flag */ +}; + +/** + * @brief Determine whether all the bits of @a other are set in @a self. + * + * @param[in] self Bitmask flag to be compared + * @param[in] other Bitmask value(s) to compare + * + * @retval true compatible + * @retval false not compatible + */ +static inline bool flag_compatible( + unsigned int self, + unsigned int other) +{ + return (self & other) == other; +} + +/** + * @brief Determine whether BUFFER_RDWR is set in @a flag. + * + * @param[in] flag Bitmask flag to be compared + * + * @retval true BUFFER_RDWR set + * @retval false BUFFER_RDWR not set + */ +static inline unsigned int flag_access_mode( + unsigned int flag) +{ + return flag & BUFFER_RDWR; +} + +/** + * @brief Map capture common buffer access flag to a Linux dma_data_direction. + * + * @param[in] flag Bitmask access flag of capture common buffer + * + * @returns @ref dma_data_direction mapping + */ +static inline enum dma_data_direction flag_dma_direction( + unsigned int flag) +{ + static const enum dma_data_direction dir[4U] = { + [0U] = DMA_BIDIRECTIONAL, + [BUFFER_READ] = DMA_TO_DEVICE, + [BUFFER_WRITE] = DMA_FROM_DEVICE, + [BUFFER_RDWR] = DMA_BIDIRECTIONAL, + }; + + return dir[flag_access_mode(flag)]; +} + +/** + * @brief Retrieve the scatterlist IOVA address of the capture surface mapping. + * + * @param[in] pin The capture_mapping of the buffer + * + * @returns Physical address of scatterlist mapping + */ +static inline dma_addr_t mapping_iova( + const struct capture_mapping *pin) +{ + dma_addr_t addr = sg_dma_address(pin->sgt->sgl); + + return (addr != 0) ? addr : sg_phys(pin->sgt->sgl); +} + +/** + * @brief Retrieve the dma_buf pointer of a capture surface mapping. + * + * @param[in] pin The capture_mapping of the buffer + * + * @returns Pointer to the capture_mapping @ref dma_buf + */ +static inline struct dma_buf *mapping_buf( + const struct capture_mapping *pin) +{ + return pin->buf; +} + +/** + * @brief Determine whether BUFFER_ADD is set in the capture surface mapping's + * access flag. + * + * @param[in] pin The capture_mapping of the buffer + * + * @retval true BUFFER_ADD set + * @retval false BUFFER_ADD not set + */ +static inline bool mapping_preserved( + const struct capture_mapping *pin) +{ + return (bool)(pin->flag & BUFFER_ADD); +} + +/** + * @brief Set or unset the BUFFER_ADD bit in the capture surface mapping's + * access flag, and correspondingly increment or decrement the mapping's refcnt. + * + * @param[in] pin The capture_mapping of the buffer + * @param[in] val The capture_mapping of the buffer + * + * @retval true BUFFER_ADD set + * @retval false BUFFER_ADD not set + */ +static inline void set_mapping_preservation( + struct capture_mapping *pin, + bool val) +{ + if (val) { + pin->flag |= BUFFER_ADD; + atomic_inc(&pin->refcnt); + } else { + pin->flag &= (~BUFFER_ADD); + atomic_dec(&pin->refcnt); + } +} + +/** + * @brief Iteratively search a capture buffer management table to find the entry + * with @a buf, and @a flag bits set in the capture mapping. + * + * On success, the capture mapping is incremented by one if it is non-zero. + * + * @param[in] tab The capture buffer management table + * @param[in] buf The mapping dma_buf pointer to match + * @param[in] flag The mapping bitmask access flag to compare + * + * @returns @ref capture_mapping pointer (success), NULL (failure) + */ +static struct capture_mapping *find_mapping( + struct capture_buffer_table *tab, + struct dma_buf *buf, + unsigned int flag) +{ + struct capture_mapping *pin; + bool success; + + read_lock(&tab->hlock); + + hash_for_each_possible(tab->hhead, pin, hnode, (unsigned long)buf) { + if ( + (pin->buf == buf) && + flag_compatible(pin->flag, flag) + ) { + success = atomic_inc_not_zero(&pin->refcnt); + if (success) { + read_unlock(&tab->hlock); + return pin; + } + } + } + + read_unlock(&tab->hlock); + + return NULL; +} + +/** + * @brief Add an NvRm buffer to the buffer management table and initialize its + * refcnt to 1. + * + * @param[in] tab The capture buffer management table + * @param[in] fd The NvRm handle + * @param[in] flag The mapping bitmask access flag to set + * + * @returns @ref capture_mapping pointer (success), PTR_ERR (failure) + */ +static struct capture_mapping *get_mapping( + struct capture_buffer_table *tab, + uint32_t fd, + unsigned int flag) +{ + struct capture_mapping *pin; + struct dma_buf *buf; + void *err; + + if (unlikely(tab == NULL)) { + pr_err("%s: invalid buffer table\n", __func__); + return ERR_PTR(-EINVAL); + } + + buf = dma_buf_get((int)fd); + if (IS_ERR(buf)) { + dev_err(tab->dev, "%s:%d: invalid memfd %u; errno %ld \n", + __func__, __LINE__, fd, PTR_ERR(buf)); + return ERR_CAST(buf); + } + + pin = find_mapping(tab, buf, flag); + if (pin != NULL) { + dma_buf_put(buf); + return pin; + } + + pin = kmem_cache_alloc(tab->cache, GFP_KERNEL); + if (unlikely(pin == NULL)) { + err = ERR_PTR(-ENOMEM); + goto err0; + } + + pin->atch = dma_buf_attach(buf, tab->dev); + if (unlikely(IS_ERR(pin->atch))) { + err = pin->atch; + goto err1; + } + + pin->sgt = dma_buf_map_attachment(pin->atch, flag_dma_direction(flag)); + if (unlikely(IS_ERR(pin->sgt))) { + err = pin->sgt; + goto err2; + } + + pin->flag = flag; + pin->buf = buf; + atomic_set(&pin->refcnt, 1U); + INIT_HLIST_NODE(&pin->hnode); + + write_lock(&tab->hlock); + hash_add(tab->hhead, &pin->hnode, (unsigned long)pin->buf); + write_unlock(&tab->hlock); + + return pin; +err2: + dma_buf_detach(buf, pin->atch); +err1: + kmem_cache_free(tab->cache, pin); +err0: + dma_buf_put(buf); + dev_err(tab->dev, "%s:%d: memfd %u, flag %u; errno %ld \n", + __func__, __LINE__,fd, flag, PTR_ERR(buf)); + return err; +} + +struct capture_buffer_table *create_buffer_table( + struct device *dev) +{ + struct capture_buffer_table *tab; + + tab = kmalloc(sizeof(*tab), GFP_KERNEL); + + if (likely(tab != NULL)) { + tab->cache = KMEM_CACHE(capture_mapping, 0U); + + if (likely(tab->cache != NULL)) { + tab->dev = dev; + hash_init(tab->hhead); + rwlock_init(&tab->hlock); + } else { + kfree(tab); + tab = NULL; + } + } + + return tab; +} +EXPORT_SYMBOL_GPL(create_buffer_table); + +void destroy_buffer_table( + struct capture_buffer_table *tab) +{ + size_t bkt; + struct hlist_node *next; + struct capture_mapping *pin; + + if (unlikely(tab == NULL)) + return; + + write_lock(&tab->hlock); + + hash_for_each_safe(tab->hhead, bkt, next, pin, hnode) { + hash_del(&pin->hnode); + dma_buf_unmap_attachment( + pin->atch, pin->sgt, flag_dma_direction(pin->flag)); + dma_buf_detach(pin->buf, pin->atch); + dma_buf_put(pin->buf); + kmem_cache_free(tab->cache, pin); + } + + write_unlock(&tab->hlock); + + kmem_cache_destroy(tab->cache); + kfree(tab); +} +EXPORT_SYMBOL_GPL(destroy_buffer_table); + +static DEFINE_MUTEX(req_lock); + +int capture_buffer_request( + struct capture_buffer_table *tab, + uint32_t memfd, + uint32_t flag) +{ + struct capture_mapping *pin; + struct dma_buf *buf; + bool add = (bool)(flag & BUFFER_ADD); + int err = 0; + + if (unlikely(tab == NULL)) { + pr_err("%s: invalid buffer table\n", __func__); + return -EINVAL; + } + + mutex_lock(&req_lock); + + if (add) { + pin = get_mapping(tab, memfd, flag_access_mode(flag)); + if (IS_ERR(pin)) { + err = PTR_ERR_OR_ZERO(pin); + dev_err(tab->dev, "%s:%d: memfd %u, flag %u; errno %d", + __func__, __LINE__, memfd, flag, err); + goto end; + } + + if (mapping_preserved(pin)) { + err = -EEXIST; + dev_err(tab->dev, "%s:%d: memfd %u exists; errno %d", + __func__, __LINE__, memfd, err); + put_mapping(tab, pin); + goto end; + } + } else { + buf = dma_buf_get((int)memfd); + if (IS_ERR(buf)) { + err = PTR_ERR_OR_ZERO(buf); + dev_err(tab->dev, "%s:%d: invalid memfd %u; errno %d", + __func__, __LINE__, memfd, err); + goto end; + } + + pin = find_mapping(tab, buf, BUFFER_ADD); + if (pin == NULL) { + err = -ENOENT; + dev_err(tab->dev, "%s:%d: memfd %u not exists; errno %d", + __func__, __LINE__, memfd, err); + dma_buf_put(buf); + goto end; + } + dma_buf_put(buf); + } + + set_mapping_preservation(pin, add); + put_mapping(tab, pin); + +end: + mutex_unlock(&req_lock); + return err; +} +EXPORT_SYMBOL_GPL(capture_buffer_request); + +int capture_buffer_add( + struct capture_buffer_table *t, + uint32_t fd) +{ + return capture_buffer_request(t, fd, BUFFER_ADD | BUFFER_RDWR); +} +EXPORT_SYMBOL_GPL(capture_buffer_add); + +void put_mapping( + struct capture_buffer_table *t, + struct capture_mapping *pin) +{ + bool zero; + + zero = atomic_dec_and_test(&pin->refcnt); + if (zero) { + if (unlikely(mapping_preserved(pin))) { + dev_err(t->dev, "%s:%d: unexpected put for a preserved mapping", + __func__, __LINE__); + atomic_inc(&pin->refcnt); + return; + } + + write_lock(&t->hlock); + hash_del(&pin->hnode); + write_unlock(&t->hlock); + + dma_buf_unmap_attachment( + pin->atch, pin->sgt, flag_dma_direction(pin->flag)); + dma_buf_detach(pin->buf, pin->atch); + dma_buf_put(pin->buf); + kmem_cache_free(t->cache, pin); + } +} +EXPORT_SYMBOL_GPL(put_mapping); + +int capture_common_pin_and_get_iova(struct capture_buffer_table *buf_ctx, + uint32_t mem_handle, uint64_t mem_offset, + uint64_t *meminfo_base_address, uint64_t *meminfo_size, + struct capture_common_unpins *unpins) +{ + struct capture_mapping *map; + struct dma_buf *buf; + uint64_t size; + uint64_t iova; + + /* NULL is a valid unput indicating unused data field */ + if (!mem_handle) { + return 0; + } + + if (unpins->num_unpins >= MAX_PIN_BUFFER_PER_REQUEST) { + pr_err("%s: too many buffers per request\n", __func__); + return -ENOMEM; + } + + map = get_mapping(buf_ctx, mem_handle, BUFFER_RDWR); + + if (IS_ERR(map)) { + pr_err("%s: cannot get mapping\n", __func__); + return -EINVAL; + } + + buf = mapping_buf(map); + size = buf->size; + iova = mapping_iova(map); + + if (mem_offset >= size) { + pr_err("%s: offset is out of bounds\n", __func__); + return -EINVAL; + } + + *meminfo_base_address = iova + mem_offset; + *meminfo_size = size - mem_offset; + + unpins->data[unpins->num_unpins] = map; + unpins->num_unpins++; + return 0; +} +EXPORT_SYMBOL_GPL(capture_common_pin_and_get_iova); + +int capture_common_setup_progress_status_notifier( + struct capture_common_status_notifier *status_notifier, + uint32_t mem, + uint32_t buffer_size, + uint32_t mem_offset) +{ + struct dma_buf *dmabuf; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + struct dma_buf_map map; +#else + struct iosys_map map; +#endif + void *va; + int err = 0; + + /* take reference for the userctx */ + dmabuf = dma_buf_get(mem); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + if (buffer_size > U32_MAX - mem_offset) { + pr_err("%s: buffer_size or mem_offset too large\n", __func__); + return -EINVAL; + } + + if ((buffer_size + mem_offset) > dmabuf->size) { + dma_buf_put(dmabuf); + pr_err("%s: invalid offset\n", __func__); + return -EINVAL; + } + + /* map handle and clear error notifier struct */ + err = dma_buf_vmap(dmabuf, &map); + va = err ? NULL : map.vaddr; + if (!va) { + dma_buf_put(dmabuf); + pr_err("%s: Cannot map notifier handle\n", __func__); + return -ENOMEM; + } + + memset(va, 0, buffer_size); + + status_notifier->buf = dmabuf; + status_notifier->va = va; + status_notifier->offset = mem_offset; + return 0; +} +EXPORT_SYMBOL_GPL(capture_common_setup_progress_status_notifier); + +int capture_common_release_progress_status_notifier( + struct capture_common_status_notifier *progress_status_notifier) +{ + struct dma_buf *dmabuf = progress_status_notifier->buf; + void *va = progress_status_notifier->va; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(va); +#else + struct iosys_map map = IOSYS_MAP_INIT_VADDR(va); +#endif + + if (dmabuf != NULL) { + if (va != NULL) + dma_buf_vunmap(dmabuf, &map); + + dma_buf_put(dmabuf); + } + + progress_status_notifier->buf = NULL; + progress_status_notifier->va = NULL; + progress_status_notifier->offset = 0; + + return 0; +} +EXPORT_SYMBOL_GPL(capture_common_release_progress_status_notifier); + +int capture_common_set_progress_status( + struct capture_common_status_notifier *progress_status_notifier, + uint32_t buffer_slot, + uint32_t buffer_depth, + uint8_t new_val) +{ + uint32_t *status_notifier = (uint32_t *) (progress_status_notifier->va + + progress_status_notifier->offset); + + if (buffer_slot >= buffer_depth) { + pr_err("%s: Invalid offset!", __func__); + return -EINVAL; + } + buffer_slot = array_index_nospec(buffer_slot, buffer_depth); + + /* + * Since UMD and KMD can both write to the shared progress status + * notifier buffer, insert memory barrier here to ensure that any + * other store operations to the buffer would be done before the + * write below. + */ + wmb(); + + status_notifier[buffer_slot] = new_val; + + return 0; +} +EXPORT_SYMBOL_GPL(capture_common_set_progress_status); + +int capture_common_pin_memory( + struct device *dev, + uint32_t mem, + struct capture_common_buf *unpin_data) +{ + struct dma_buf *buf; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + struct dma_buf_map map; +#else + struct iosys_map map; +#endif + struct dma_buf_attachment *attach; + struct sg_table *sgt; + int err = 0; + + buf = dma_buf_get(mem); + if (IS_ERR(buf)) { + err = PTR_ERR(buf); + goto fail; + } + + attach = dma_buf_attach(buf, dev); + if (IS_ERR(attach)) { + err = PTR_ERR(attach); + goto fail; + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto fail; + } + + if (sg_dma_address(sgt->sgl) == 0) + sg_dma_address(sgt->sgl) = sg_phys(sgt->sgl); + + err = dma_buf_vmap(buf, &map); + unpin_data->va = err ? NULL : map.vaddr; + if (unpin_data->va == NULL) { + pr_err("%s: failed to map pinned memory\n", __func__); + goto fail; + } + + unpin_data->iova = sg_dma_address(sgt->sgl); + unpin_data->buf = buf; + unpin_data->attach = attach; + unpin_data->sgt = sgt; + + return 0; + +fail: + capture_common_unpin_memory(unpin_data); + return err; +} +EXPORT_SYMBOL_GPL(capture_common_pin_memory); + +void capture_common_unpin_memory( + struct capture_common_buf *unpin_data) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(unpin_data->va); +#else + struct iosys_map map = IOSYS_MAP_INIT_VADDR(unpin_data->va); +#endif + + if (unpin_data->va) + dma_buf_vunmap(unpin_data->buf, &map); + + if (unpin_data->sgt != NULL) + dma_buf_unmap_attachment(unpin_data->attach, unpin_data->sgt, + DMA_BIDIRECTIONAL); + if (unpin_data->attach != NULL) + dma_buf_detach(unpin_data->buf, unpin_data->attach); + if (unpin_data->buf != NULL) + dma_buf_put(unpin_data->buf); + + unpin_data->sgt = NULL; + unpin_data->attach = NULL; + unpin_data->buf = NULL; + unpin_data->iova = 0; + unpin_data->va = NULL; +} +EXPORT_SYMBOL_GPL(capture_common_unpin_memory); diff --git a/drivers/media/platform/tegra/camera/fusa-capture/capture-isp-channel.c b/drivers/media/platform/tegra/camera/fusa-capture/capture-isp-channel.c new file mode 100644 index 00000000..90d16333 --- /dev/null +++ b/drivers/media/platform/tegra/camera/fusa-capture/capture-isp-channel.c @@ -0,0 +1,627 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + +/** + * @file drivers/media/platform/tegra/camera/fusa-capture/capture-isp-channel.c + * + * @brief ISP channel character device driver for the T186/T194 Camera RTCPU + * platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * @todo This parameter is platform-dependent and should be retrieved from the + * Device Tree. + */ +#define MAX_ISP_CHANNELS 64 + +/** + * @brief ISP channel character device driver context. + */ +struct isp_channel_drv { + struct device *dev; /**< ISP kernel @em device */ + u8 num_channels; /**< No. of ISP channel character devices */ + struct mutex lock; /**< ISP channel driver context lock. */ + struct platform_device *ndev; /**< ISP kernel @em platform_device */ + const struct isp_channel_drv_ops *ops; + /**< ISP fops for Host1x syncpt/gos allocations */ + struct tegra_isp_channel *channels[]; + /**< Allocated ISP channel contexts */ +}; + +/** + * @defgroup ISP_CHANNEL_IOCTLS + * + * @brief ISP channel character device IOCTL API + * + * Clients in the UMD may open sysfs character devices representing ISP + * channels, and perform configuration, and enqueue buffers in capture and + * program requests to the low-level RCE subsystem via these IOCTLs. + * + * @{ + */ + +/** + * @brief Set up ISP channel resources and request FW channel allocation in RCE. + * + * Initialize the ISP channel context and synchronization primitives, pin memory + * for the capture and program process descriptor queues, set up the buffer + * management table, initialize the capture/capture-control IVC channels and + * request ISP FW channel allocation in RCE. + * + * @param[in] ptr Pointer to a struct @ref isp_capture_setup + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_SETUP \ + _IOW('I', 1, struct isp_capture_setup) + +/** + * @brief Release the ISP FW channel allocation in RCE, and all resources and + * contexts in the KMD. + * + * @param[in] rel uint32_t bitmask of @ref CAPTURE_CHANNEL_RESET_FLAGS + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_RELEASE \ + _IOW('I', 2, __u32) + +/** + * @brief Reset the ISP channel in RCE synchronously w/ the KMD; all pending + * capture/program descriptors in the queue are discarded and syncpoint values + * fast-forwarded to unblock waiting clients. + * + * @param[in] rst uint32_t bitmask of @ref CAPTURE_CHANNEL_RESET_FLAGS + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_RESET \ + _IOW('I', 3, __u32) + +/** + * @brief Retrieve the ids and current values of the progress, stats progress + * syncpoints, and ISP FW channel allocated by RCE. + * + * If successful, the queried values are written back to the input struct. + * + * @param[in,out] ptr Pointer to a struct @ref isp_capture_info + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_GET_INFO \ + _IOR('I', 4, struct isp_capture_info) + +/** + * @brief Enqueue a process capture request to RCE, input and prefences are + * allocated, and the addresses to surface buffers in the descriptor (referenced + * by the buffer_index) are pinned and patched. + * + * @param[in] ptr Pointer to a struct @ref isp_capture_req + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_REQUEST \ + _IOW('I', 5, struct isp_capture_req) + +/** + * @brief Wait on the next completion of an enqueued frame, signalled by RCE. + * The status in the frame's capture descriptor is safe to read when this + * completes w/o a -ETIMEDOUT or other error. + * + * @note This call completes for the frame at the head of the FIFO queue, and is + * not necessarily for the most recently enqueued process capture request. + * + * @param[in] status uint32_t timeout [ms], 0 for indefinite + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_STATUS \ + _IOW('I', 6, __u32) + +/** + * @brief Enqueue a program request to RCE, the addresses to the push buffer in + * the descriptor (referenced by the buffer_index) are pinned and patched. + * + * @param[in] ptr Pointer to a struct @ref isp_program_req + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_PROGRAM_REQUEST \ + _IOW('I', 7, struct isp_program_req) + +/** + * @brief Wait on the next completion of an enqueued program, signalled by RCE. + * The program execution is finished and is safe to free when this call + * completes. + * + * @note This call completes for the program at the head of the FIFO queue, and + * is not necessarily for the most recently enqueued program request. + + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_PROGRAM_STATUS \ + _IOW('I', 8, __u32) + +/** + * @brief Enqueue a joint capture and program request to RCE; this is equivalent + * to calling @ref ISP_CAPTURE_PROGRAM_REQUEST and @ref ISP_CAPTURE_REQUEST + * sequentially, but the number of KMD-RCE IVC transmissions is reduced to one + * in each direction for every frame. + * + * @param[in] ptr Pointer to a struct @ref isp_capture_req_ex + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_REQUEST_EX \ + _IOW('I', 9, struct isp_capture_req_ex) + +/** + * @brief Set up the combined capture and program process progress status + * notifier array, which is a replacement for the blocking + * @ref ISP_CAPTURE_STATUS and @ref ISP_CAPTURE_PROGRAM_STATUS calls; allowing + * for out-of-order frame process completion notifications. + * + * The values written by the KMD are any of the + * @ref CAPTURE_PROGRESS_NOTIFIER_STATES. + * + * @param[in] ptr Pointer to a struct @ref isp_capture_progress_status_req + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_SET_PROGRESS_STATUS_NOTIFIER \ + _IOW('I', 10, struct isp_capture_progress_status_req) + +/** + * @brief Perform an operation on the surface buffer by setting the bitwise + * @a flag field with @ref CAPTURE_BUFFER_OPS flags. + * + * @param[in] ptr Pointer to a struct @ref isp_buffer_req. + * + * @returns 0 (success), neg. errno (failure) + */ +#define ISP_CAPTURE_BUFFER_REQUEST \ + _IOW('I', 11, struct isp_buffer_req) + +/** @} */ + +/** + * @brief Power on ISP via Host1x. The ISP channel is registered as an NvHost + * ISP client and the reference count is incremented by one. + * + * @param[in] chan ISP channel context + * @returns 0 (success), neg. errno (failure) + */ +static int isp_channel_power_on( + struct tegra_isp_channel *chan) +{ + int ret = 0; + + dev_dbg(chan->isp_dev, "isp_channel_power_on\n"); + ret = nvhost_module_add_client(chan->ndev, chan->priv); + if (ret < 0) { + dev_err(chan->isp_dev, + "%s: failed to add isp client\n", __func__); + return ret; + } + + ret = nvhost_module_busy(chan->ndev); + if (ret < 0) { + dev_err(chan->isp_dev, + "%s: failed to power on isp\n", __func__); + return ret; + } + return 0; +} + +/** + * @brief Power off ISP via Host1x. The NvHost module reference count is + * decreased by one and the ISP channel is unregistered as a client. + * + * @param[in] chan ISP channel context + */ +static void isp_channel_power_off( + struct tegra_isp_channel *chan) +{ + dev_dbg(chan->isp_dev, "isp_channel_power_off\n"); + nvhost_module_idle(chan->ndev); + nvhost_module_remove_client(chan->ndev, chan->priv); +} + +static struct isp_channel_drv *chdrv_; +static DEFINE_MUTEX(chdrv_lock); + +/** + * @brief Open an ISP channel character device node, power on the camera + * subsystem and initialize the channel driver context. + * + * The act of opening an ISP channel character device node does not entail the + * reservation of an ISP channel, ISP_CAPTURE_SETUP must be called afterwards + * to request an allocation by RCE. + * + * This is the @a open file operation handler for an ISP channel node. + * + * @param[in] inode ISP channel character device inode struct + * @param[in] file ISP channel character device file struct + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_channel_open( + struct inode *inode, + struct file *file) +{ + struct tegra_isp_channel *chan; + unsigned int channel = iminor(inode); + struct isp_channel_drv *chan_drv; + int err; + + if (mutex_lock_interruptible(&chdrv_lock)) + return -ERESTARTSYS; + + chan_drv = chdrv_; + + if (chan_drv == NULL || channel >= chan_drv->num_channels) { + mutex_unlock(&chdrv_lock); + return -ENODEV; + } + mutex_unlock(&chdrv_lock); + + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (unlikely(chan == NULL)) + return -ENOMEM; + + chan->drv = chan_drv; + chan->isp_dev = chan_drv->dev; + chan->ndev = chan_drv->ndev; + chan->ops = chan_drv->ops; + chan->priv = file; + + err = isp_channel_power_on(chan); + if (err < 0) + goto error; + + err = isp_capture_init(chan); + if (err < 0) + goto init_err; + + mutex_lock(&chan_drv->lock); + if (chan_drv->channels[channel] != NULL) { + mutex_unlock(&chan_drv->lock); + err = -EBUSY; + goto chan_err; + } + + chan_drv->channels[channel] = chan; + mutex_unlock(&chan_drv->lock); + + file->private_data = chan; + + return nonseekable_open(inode, file); + +chan_err: + isp_capture_shutdown(chan); +init_err: + isp_channel_power_off(chan); +error: + kfree(chan); + return err; +} + +/** + * @brief Release an ISP channel character device node, power off the camera + * subsystem and free the ISP channel driver context. + * + * Under normal operation, ISP_CAPTURE_RESET followed by ISP_CAPTURE_RELEASE + * should be called before releasing the file handle on the device node. + * + * This is the @a release file operation handler for an ISP channel node. + * + * @param[in] inode ISP channel character device inode struct + * @param[in] file ISP channel character device file struct + * + * @returns 0 + */ +static int isp_channel_release( + struct inode *inode, + struct file *file) +{ + struct tegra_isp_channel *chan = file->private_data; + unsigned int channel = iminor(inode); + struct isp_channel_drv *chan_drv = chan->drv; + + isp_capture_shutdown(chan); + isp_channel_power_off(chan); + + mutex_lock(&chan_drv->lock); + + WARN_ON(chan_drv->channels[channel] != chan); + chan_drv->channels[channel] = NULL; + + mutex_unlock(&chan_drv->lock); + kfree(chan); + + return 0; +} + +/** + * @brief Process an IOCTL call on an ISP channel character device. + * + * Depending on the specific IOCTL, the argument (@a arg) may be a pointer to a + * defined struct payload that is copied from or back to user-space. This memory + * is allocated and mapped from user-space and must be kept available until + * after the IOCTL call completes. + * + * This is the @a ioctl file operation handler for an ISP channel node. + * + * @param[in] file ISP channel character device file struct + * @param[in] cmd ISP channel IOCTL command + * @param[in,out] arg IOCTL argument; numerical value or pointer + * + * @returns 0 (success), neg. errno (failure) + */ +static long isp_channel_ioctl( + struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct tegra_isp_channel *chan = file->private_data; + void __user *ptr = (void __user *)arg; + long err = -EFAULT; + + if (unlikely(chan == NULL)) { + pr_err("%s: invalid channel\n", __func__); + return -EINVAL; + } + + switch (_IOC_NR(cmd)) { + case _IOC_NR(ISP_CAPTURE_SETUP): { + struct isp_capture_setup setup; + + if (copy_from_user(&setup, ptr, sizeof(setup))) + break; + err = isp_capture_setup(chan, &setup); + if (err) + dev_err(chan->isp_dev, "isp capture setup failed\n"); + break; + } + + case _IOC_NR(ISP_CAPTURE_RESET): { + uint32_t rst; + + if (copy_from_user(&rst, ptr, sizeof(rst))) + break; + err = isp_capture_reset(chan, rst); + if (err) + dev_err(chan->isp_dev, "isp capture reset failed\n"); + break; + } + + case _IOC_NR(ISP_CAPTURE_RELEASE): { + uint32_t rel; + + if (copy_from_user(&rel, ptr, sizeof(rel))) + break; + err = isp_capture_release(chan, rel); + if (err) + dev_err(chan->isp_dev, "isp capture release failed\n"); + break; + } + + case _IOC_NR(ISP_CAPTURE_GET_INFO): { + struct isp_capture_info info; + (void)memset(&info, 0, sizeof(info)); + + err = isp_capture_get_info(chan, &info); + if (err) { + dev_err(chan->isp_dev, "isp capture get info failed\n"); + break; + } + if (copy_to_user(ptr, &info, sizeof(info))) + err = -EFAULT; + break; + } + + case _IOC_NR(ISP_CAPTURE_REQUEST): { + struct isp_capture_req req; + + if (copy_from_user(&req, ptr, sizeof(req))) + break; + err = isp_capture_request(chan, &req); + if (err) + dev_err(chan->isp_dev, + "isp process capture request submit failed\n"); + break; + } + + case _IOC_NR(ISP_CAPTURE_STATUS): { + uint32_t status; + + if (copy_from_user(&status, ptr, sizeof(status))) + break; + err = isp_capture_status(chan, status); + if (err) + dev_err(chan->isp_dev, + "isp process get status failed\n"); + break; + } + + case _IOC_NR(ISP_CAPTURE_PROGRAM_REQUEST): { + struct isp_program_req program_req; + + if (copy_from_user(&program_req, ptr, sizeof(program_req))) + break; + err = isp_capture_program_request(chan, &program_req); + if (err) + dev_err(chan->isp_dev, + "isp process program request submit failed\n"); + break; + } + + case _IOC_NR(ISP_CAPTURE_PROGRAM_STATUS): { + err = isp_capture_program_status(chan); + + if (err) + dev_err(chan->isp_dev, + "isp process program get status failed\n"); + break; + } + + case _IOC_NR(ISP_CAPTURE_REQUEST_EX): { + struct isp_capture_req_ex req; + + if (copy_from_user(&req, ptr, sizeof(req))) + break; + err = isp_capture_request_ex(chan, &req); + if (err) + dev_err(chan->isp_dev, + "isp process request extended submit failed\n"); + break; + } + case _IOC_NR(ISP_CAPTURE_SET_PROGRESS_STATUS_NOTIFIER): { + struct isp_capture_progress_status_req req; + + if (copy_from_user(&req, ptr, sizeof(req))) + break; + err = isp_capture_set_progress_status_notifier(chan, &req); + if (err) + dev_err(chan->isp_dev, + "isp capture set progress status buffers failed\n"); + break; + } + case _IOC_NR(ISP_CAPTURE_BUFFER_REQUEST): { + struct isp_buffer_req req; + + if (copy_from_user(&req, ptr, sizeof(req)) != 0U) + break; + + err = isp_capture_buffer_request(chan, &req); + if (err < 0) + dev_err(chan->isp_dev, "isp buffer req failed\n"); + break; + } + default: { + dev_err(chan->isp_dev, "%s:Unknown ioctl\n", __func__); + return -ENOIOCTLCMD; + } + } + + return err; +} + +static const struct file_operations isp_channel_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = isp_channel_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = isp_channel_ioctl, +#endif + .open = isp_channel_open, + .release = isp_channel_release, +}; + +/* Character device */ +static struct class *isp_channel_class; +static int isp_channel_major; + +int isp_channel_drv_register( + struct platform_device *ndev, + const struct isp_channel_drv_ops *ops) +{ + struct isp_channel_drv *chan_drv; + unsigned int i; + + chan_drv = kzalloc(offsetof(struct isp_channel_drv, + channels[MAX_ISP_CHANNELS]), GFP_KERNEL); + if (unlikely(chan_drv == NULL)) + return -ENOMEM; + + chan_drv->dev = &ndev->dev; + chan_drv->ndev = ndev; + chan_drv->ops = ops; + chan_drv->num_channels = MAX_ISP_CHANNELS; + mutex_init(&chan_drv->lock); + + mutex_lock(&chdrv_lock); + if (WARN_ON(chdrv_ != NULL)) { + mutex_unlock(&chdrv_lock); + kfree(chan_drv); + return -EBUSY; + } + chdrv_ = chan_drv; + mutex_unlock(&chdrv_lock); + + for (i = 0; i < chan_drv->num_channels; i++) { + dev_t devt = MKDEV(isp_channel_major, i); + + device_create(isp_channel_class, chan_drv->dev, devt, NULL, + "capture-isp-channel%u", i); + } + + return 0; +} +EXPORT_SYMBOL(isp_channel_drv_register); + +void isp_channel_drv_unregister( + struct device *dev) +{ + struct isp_channel_drv *chan_drv; + unsigned int i; + + mutex_lock(&chdrv_lock); + chan_drv = chdrv_; + chdrv_ = NULL; + WARN_ON(chan_drv->dev != dev); + mutex_unlock(&chdrv_lock); + + for (i = 0; i < chan_drv->num_channels; i++) { + dev_t devt = MKDEV(isp_channel_major, i); + + device_destroy(isp_channel_class, devt); + } + + kfree(chan_drv); +} +EXPORT_SYMBOL(isp_channel_drv_unregister); + +/** + * @brief Initialize the ISP channel driver device (major). + * + * @returns 0 (success), PTR_ERR or neg. ISP channel major no. (failuure) + */ +int isp_channel_drv_init(void) +{ + isp_channel_class = class_create(THIS_MODULE, "capture-isp-channel"); + if (IS_ERR(isp_channel_class)) + return PTR_ERR(isp_channel_class); + + isp_channel_major = register_chrdev(0, "capture-isp-channel", + &isp_channel_fops); + if (isp_channel_major < 0) { + class_destroy(isp_channel_class); + return isp_channel_major; + } + + return 0; +} +EXPORT_SYMBOL(isp_channel_drv_init); + +/** + * @brief De-initialize the ISP channel driver device (major). + */ +void isp_channel_drv_exit(void) +{ + unregister_chrdev(isp_channel_major, "capture-isp-channel"); + class_destroy(isp_channel_class); +} +EXPORT_SYMBOL(isp_channel_drv_exit); \ No newline at end of file diff --git a/drivers/media/platform/tegra/camera/fusa-capture/capture-isp.c b/drivers/media/platform/tegra/camera/fusa-capture/capture-isp.c new file mode 100644 index 00000000..aa9abd8b --- /dev/null +++ b/drivers/media/platform/tegra/camera/fusa-capture/capture-isp.c @@ -0,0 +1,1988 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + +/** + * @file drivers/media/platform/tegra/camera/fusa-capture/capture-isp.c + * + * @brief ISP channel operations for the T186/T194 Camera RTCPU platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "soc/tegra/camrtc-capture.h" +#include "soc/tegra/camrtc-capture-messages.h" +#include +#include +#include +#include +#include + +/** + * @brief Invalid ISP channel ID; the channel is not initialized. + */ +#define CAPTURE_CHANNEL_ISP_INVALID_ID U16_C(0xFFFF) + +/** + * @brief ISP channel process descriptor queue context. + */ +struct isp_desc_rec { + struct capture_common_buf requests; /**< Process descriptor queue */ + size_t request_buf_size; /**< Size of process descriptor queue [byte] */ + uint32_t queue_depth; /**< No. of process descriptors in queue */ + uint32_t request_size; + /**< Size of a single process descriptor [byte] */ + void *requests_memoryinfo; + /**< memory info ringbuffer */ + uint64_t requests_memoryinfo_iova; + /**< memory info ringbuffer rtcpu iova */ + + uint32_t progress_status_buffer_depth; + /**< No. of process descriptors. */ + + struct mutex unpins_list_lock; /**< Lock for unpins_list */ + struct capture_common_unpins *unpins_list; + /**< List of process request buffer unpins */ +}; + +/** + * @brief ISP channel capture context. + */ +struct isp_capture { + uint16_t channel_id; /**< RCE-assigned ISP FW channel id */ + struct device *rtcpu_dev; /**< rtcpu device */ + struct tegra_isp_channel *isp_channel; /**< ISP channel context */ + struct capture_buffer_table *buffer_ctx; + /**< Surface buffer management table */ + + struct isp_desc_rec capture_desc_ctx; + /**< Capture process descriptor queue context */ + struct isp_desc_rec program_desc_ctx; + /**< Program process descriptor queue context */ + + struct capture_common_status_notifier progress_status_notifier; + /**< Process progress status notifier context */ + bool is_progress_status_notifier_set; + /**< Whether progress_status_notifer has been initialized */ + +#ifdef HAVE_ISP_GOS_TABLES + uint32_t num_gos_tables; /**< No. of cv devices in gos_tables */ + const dma_addr_t *gos_tables; /**< IOVA addresses of all GoS devices */ +#endif + + struct syncpoint_info progress_sp; /**< Syncpoint for frame progress */ + struct syncpoint_info stats_progress_sp; + /**< Syncpoint for stats progress */ + + struct completion control_resp; + /**< Completion for capture-control IVC response */ + struct completion capture_resp; + /**< + * Completion for capture process requests (frame), if progress + * status notifier is not in use + */ + struct completion capture_program_resp; + /**< + * Completion for program process requests (frame), if progress + * status notifier is not in use + */ + + struct mutex control_msg_lock; + /**< Lock for capture-control IVC control_resp_msg */ + struct CAPTURE_CONTROL_MSG control_resp_msg; + /**< capture-control IVC resp msg written to by callback */ + + struct mutex reset_lock; + /**< Channel lock for reset/abort support (via RCE) */ + bool reset_capture_program_flag; + /**< Reset flag to drain pending program process requests */ + bool reset_capture_flag; + /**< Reset flag to drain pending capture process requests */ +}; + +/** + * @brief Initialize an ISP syncpoint and get its GoS backing. + * + * @param[in] chan ISP channel context + * @param[in] name Syncpoint name + * @param[in] enable Whether to initialize or just clear @a sp + * @param[out] sp Syncpoint handle + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_capture_setup_syncpt( + struct tegra_isp_channel *chan, + const char *name, + bool enable, + struct syncpoint_info *sp) +{ + struct platform_device *pdev = chan->ndev; + uint32_t gos_index = GOS_INDEX_INVALID; + uint32_t gos_offset = 0; + int err; + + memset(sp, 0, sizeof(*sp)); + + if (!enable) + return 0; + + + err = chan->ops->alloc_syncpt(pdev, name, &sp->id); + if (err) + return err; + + err = nvhost_syncpt_read_ext_check(pdev, sp->id, &sp->threshold); + if (err) + goto cleanup; + + err = chan->ops->get_syncpt_gos_backing(pdev, sp->id, &sp->shim_addr, + &gos_index, &gos_offset); + if (err) + goto cleanup; + + sp->gos_index = gos_index; + sp->gos_offset = gos_offset; + + return 0; + +cleanup: + chan->ops->release_syncpt(pdev, sp->id); + memset(sp, 0, sizeof(*sp)); + + return err; +} + +/** + * @brief Release an ISP syncpoint and clear its handle. + * + * @param[in] chan ISP channel context + * @param[out] sp Syncpoint handle + */ +static void isp_capture_release_syncpt( + struct tegra_isp_channel *chan, + struct syncpoint_info *sp) +{ + if (sp->id) + chan->ops->release_syncpt(chan->ndev, sp->id); + + memset(sp, 0, sizeof(*sp)); +} + +/** + * @brief Release the ISP channel progress and stats progress syncpoints. + * + * @param[in] chan ISP channel context + * + * @returns 0 (success), neg. errno (failure) + */ +static void isp_capture_release_syncpts( + struct tegra_isp_channel *chan) +{ + struct isp_capture *capture = chan->capture_data; + + isp_capture_release_syncpt(chan, &capture->progress_sp); + isp_capture_release_syncpt(chan, &capture->stats_progress_sp); +} + +/** + * @brief Set up the ISP channel progress and stats progress syncpoints. + * + * @param[in] chan ISP channel context + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_capture_setup_syncpts( + struct tegra_isp_channel *chan) +{ + struct isp_capture *capture = chan->capture_data; + int err = 0; + +#ifdef HAVE_ISP_GOS_TABLES + capture->num_gos_tables = chan->ops->get_gos_table(chan->ndev, + &capture->gos_tables); +#endif + + err = isp_capture_setup_syncpt(chan, "progress", true, + &capture->progress_sp); + if (err < 0) + goto fail; + + err = isp_capture_setup_syncpt(chan, "stats_progress", + true, + &capture->stats_progress_sp); + if (err < 0) + goto fail; + + return 0; + +fail: + isp_capture_release_syncpts(chan); + return err; +} + +/** + * @brief Read the value of an ISP channel syncpoint. + * + * @param[in] chan ISP channel context + * @param[in] sp Syncpoint handle + * @param[out] val Syncpoint value + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_capture_read_syncpt( + struct tegra_isp_channel *chan, + struct syncpoint_info *sp, + uint32_t *val) +{ + int err; + + if (sp->id) { + err = nvhost_syncpt_read_ext_check(chan->ndev, + sp->id, val); + if (err < 0) { + dev_err(chan->isp_dev, + "%s: get syncpt %i val failed\n", __func__, + sp->id); + return -EINVAL; + } + } + + return 0; +} + +/** + * @brief Patch the descriptor GoS SID (@a gos_relative) and syncpoint shim + * address (@a sp_relative) with the ISP IOVA-mapped addresses of a syncpoint + * (@a fence_offset). + * + * @param[in] chan ISP channel context + * @param[in] fence_offset Syncpoint offset from process descriptor queue + * [byte] + * @param[in] gos_relative GoS SID offset from @a fence_offset [byte] + * @param[in] sp_relative Shim address from @a fence_offset [byte] + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_capture_populate_fence_info( + struct tegra_isp_channel *chan, + int fence_offset, + uint32_t gos_relative, + uint32_t sp_relative, + void *reloc_page_addr) +{ + int err = 0; + uint64_t sp_raw; + uint32_t sp_id; + dma_addr_t syncpt_addr; + uint32_t gos_index; + uint32_t gos_offset; + uint64_t gos_info = 0; + reloc_page_addr += fence_offset & PAGE_MASK; + + if (unlikely(reloc_page_addr == NULL)) { + dev_err(chan->isp_dev, "%s: couldn't map request\n", __func__); + return -ENOMEM; + } + + sp_raw = __raw_readq( + (void __iomem *)(reloc_page_addr + + (fence_offset & ~PAGE_MASK))); + sp_id = sp_raw & 0xFFFFFFFF; + + err = chan->ops->get_syncpt_gos_backing(chan->ndev, sp_id, &syncpt_addr, + &gos_index, &gos_offset); + if (err) { + dev_err(chan->isp_dev, + "%s: get GoS backing failed\n", __func__); + goto ret; + } + + gos_info = ((((uint16_t)gos_offset << 16) | ((uint8_t)gos_index) << 8) + & 0xFFFFFFFF); + + __raw_writeq(gos_info, (void __iomem *)(reloc_page_addr + + ((fence_offset + gos_relative) & ~PAGE_MASK))); + + __raw_writeq((uint64_t)syncpt_addr, (void __iomem *)(reloc_page_addr + + ((fence_offset + sp_relative) & ~PAGE_MASK))); + +ret: + return err; +} + +/** + * @brief Patch the inputfence syncpoints of a process descriptor w/ ISP + * IOVA-mapped addresses. + * + * @param[in] chan ISP channel context + * @param[in] req ISP process request + * @param[in] request_offset Descriptor offset from process descriptor queue + * [byte] + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_capture_setup_inputfences( + struct tegra_isp_channel *chan, + struct isp_capture_req *req, + int request_offset) +{ + uint32_t __user *inpfences_reloc_user; + uint32_t *inpfences_relocs = NULL; + uint32_t inputfences_offset = 0; + void *reloc_page_addr = NULL; + struct isp_capture *capture = chan->capture_data; + void *vmap_base = NULL; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + struct dma_buf_map map; +#else + struct iosys_map map; +#endif + int i = 0; + int err = 0; + + /* It is valid not to have inputfences for given frame capture */ + if (!req->inputfences_relocs.num_relocs) + return 0; + + inpfences_reloc_user = (uint32_t __user *) + (uintptr_t)req->inputfences_relocs.reloc_relatives; + + inpfences_relocs = kcalloc(req->inputfences_relocs.num_relocs, + sizeof(uint32_t), GFP_KERNEL); + if (unlikely(inpfences_relocs == NULL)) { + dev_err(chan->isp_dev, + "failed to allocate inputfences reloc array\n"); + return -ENOMEM; + } + + err = copy_from_user(inpfences_relocs, inpfences_reloc_user, + req->inputfences_relocs.num_relocs * sizeof(uint32_t)) ? + -EFAULT : 0; + if (err < 0) { + dev_err(chan->isp_dev, "failed to copy inputfences relocs\n"); + goto fail; + } + + err = dma_buf_vmap(capture->capture_desc_ctx.requests.buf, &map); + vmap_base = err ? NULL : map.vaddr; + if (!vmap_base) { + pr_err("%s: Cannot map capture descriptor request\n", __func__); + err = -ENOMEM; + goto fail; + } + reloc_page_addr = vmap_base; + + for (i = 0; i < req->inputfences_relocs.num_relocs; i++) { + inputfences_offset = request_offset + + inpfences_relocs[i]; + err = isp_capture_populate_fence_info(chan, inputfences_offset, + req->gos_relative, req->sp_relative, reloc_page_addr); + if (err < 0) { + dev_err(chan->isp_dev, + "Populate inputfences info failed\n"); + goto fail; + } + } + + spec_bar(); + +fail: + if (vmap_base != NULL) + dma_buf_vunmap(capture->capture_desc_ctx.requests.buf, + &map); + kfree(inpfences_relocs); + return err; +} + +/** + * @brief Patch the prefence syncpoints of a process descriptor w/ ISP + * IOVA-mapped addresses. + * + * @param[in] chan ISP channel context + * @param[in] req ISP process request + * @param[in] request_offset Descriptor offset from process descriptor queue + * [byte] + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_capture_setup_prefences( + struct tegra_isp_channel *chan, + struct isp_capture_req *req, + int request_offset) +{ + uint32_t __user *prefence_reloc_user; + uint32_t *prefence_relocs = NULL; + uint32_t prefence_offset = 0; + int i = 0; + int err = 0; + void *reloc_page_addr = NULL; + struct isp_capture *capture = chan->capture_data; + void *vmap_base = NULL; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + struct dma_buf_map map; +#else + struct iosys_map map; +#endif + + /* It is valid not to have prefences for given frame capture */ + if (!req->prefences_relocs.num_relocs) + return 0; + + prefence_reloc_user = (uint32_t __user *) + (uintptr_t)req->prefences_relocs.reloc_relatives; + + prefence_relocs = kcalloc(req->prefences_relocs.num_relocs, + sizeof(uint32_t), GFP_KERNEL); + if (unlikely(prefence_relocs == NULL)) { + dev_err(chan->isp_dev, + "failed to allocate prefences reloc array\n"); + return -ENOMEM; + } + + err = copy_from_user(prefence_relocs, prefence_reloc_user, + req->prefences_relocs.num_relocs * sizeof(uint32_t)) ? + -EFAULT : 0; + if (err < 0) { + dev_err(chan->isp_dev, "failed to copy prefences relocs\n"); + goto fail; + } + + err = dma_buf_vmap(capture->capture_desc_ctx.requests.buf, &map); + vmap_base = err ? NULL : map.vaddr; + if (!vmap_base) { + pr_err("%s: Cannot map capture descriptor request\n", __func__); + err = -ENOMEM; + goto fail; + } + reloc_page_addr = vmap_base; + + for (i = 0; i < req->prefences_relocs.num_relocs; i++) { + prefence_offset = request_offset + + prefence_relocs[i]; + err = isp_capture_populate_fence_info(chan, prefence_offset, + req->gos_relative, req->sp_relative, reloc_page_addr); + if (err < 0) { + dev_err(chan->isp_dev, "Populate prefences info failed\n"); + goto fail; + } + } + + spec_bar(); + +fail: + if (vmap_base != NULL) + dma_buf_vunmap(capture->capture_desc_ctx.requests.buf, + &map); + kfree(prefence_relocs); + return err; +} + +/** + * @brief Unpin and free the list of pinned capture_mapping's associated with an + * ISP process request. + * + * @param[in] chan ISP channel context + * @param[in] buffer_index Process descriptor queue index + */ +static void isp_capture_request_unpin( + struct tegra_isp_channel *chan, + uint32_t buffer_index) +{ + struct isp_capture *capture = chan->capture_data; + struct capture_common_unpins *unpins; + int i = 0; + + mutex_lock(&capture->capture_desc_ctx.unpins_list_lock); + unpins = &capture->capture_desc_ctx.unpins_list[buffer_index]; + if (unpins->num_unpins != 0U) { + for (i = 0; i < unpins->num_unpins; i++) + put_mapping(capture->buffer_ctx, unpins->data[i]); + (void)memset(unpins, 0U, sizeof(*unpins)); + } + mutex_unlock(&capture->capture_desc_ctx.unpins_list_lock); +} + +/** + * @brief Unpin and free the list of pinned capture_mapping's associated with an + * ISP program request. + * + * @param[in] chan ISP channel context + * @param[in] buffer_index Program descriptor queue index + */ +static void isp_capture_program_request_unpin( + struct tegra_isp_channel *chan, + uint32_t buffer_index) +{ + struct isp_capture *capture = chan->capture_data; + struct capture_common_unpins *unpins; + int i = 0; + + mutex_lock(&capture->program_desc_ctx.unpins_list_lock); + unpins = &capture->program_desc_ctx.unpins_list[buffer_index]; + if (unpins->num_unpins != 0U) { + for (i = 0; i < unpins->num_unpins; i++) + put_mapping(capture->buffer_ctx, unpins->data[i]); + (void)memset(unpins, 0U, sizeof(*unpins)); + } + mutex_unlock(&capture->program_desc_ctx.unpins_list_lock); +} + +/** + * @brief Prepare and submit a pin and relocation request for a program + * descriptor, the resultant mappings are added to the channel program + * descriptor queue's @em unpins_list. + * + * @param[in] chan ISP channel context + * @param[in] req ISP program request + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_capture_program_prepare( + struct tegra_isp_channel *chan, + struct isp_program_req *req) +{ + struct isp_capture *capture = chan->capture_data; + int err = 0; + struct memoryinfo_surface *meminfo; + struct isp_program_descriptor *desc; + uint32_t request_offset; + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_ISP_INVALID_ID) { + dev_err(chan->isp_dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + if (req == NULL) { + dev_err(chan->isp_dev, + "%s: Invalid program req\n", __func__); + return -EINVAL; + } + + if (capture->program_desc_ctx.unpins_list == NULL) { + dev_err(chan->isp_dev, "Channel setup incomplete\n"); + return -EINVAL; + } + + if (req->buffer_index >= capture->program_desc_ctx.queue_depth) { + dev_err(chan->isp_dev, "buffer index is out of bound\n"); + return -EINVAL; + } + + spec_bar(); + + mutex_lock(&capture->reset_lock); + if (capture->reset_capture_program_flag) { + /* consume any pending completions when coming out of reset */ + while (try_wait_for_completion(&capture->capture_program_resp)) + ; /* do nothing */ + } + capture->reset_capture_program_flag = false; + mutex_unlock(&capture->reset_lock); + + mutex_lock(&capture->program_desc_ctx.unpins_list_lock); + + if (capture->program_desc_ctx.unpins_list[req->buffer_index].num_unpins != 0) { + dev_err(chan->isp_dev, + "%s: program request is still in use by rtcpu\n", + __func__); + mutex_unlock(&capture->program_desc_ctx.unpins_list_lock); + return -EBUSY; + } + + meminfo = &((struct memoryinfo_surface *) + capture->program_desc_ctx.requests_memoryinfo) + [req->buffer_index]; + + desc = (struct isp_program_descriptor *) + (capture->program_desc_ctx.requests.va + req->buffer_index * + capture->program_desc_ctx.request_size); + + /* Pushbuffer 1 is located after program desc in same ringbuffer */ + request_offset = req->buffer_index * + capture->program_desc_ctx.request_size; + + err = capture_common_pin_and_get_iova(chan->capture_data->buffer_ctx, + (uint32_t)(desc->isp_pb1_mem >> 32U), /* mem handle */ + ((uint32_t)desc->isp_pb1_mem) + request_offset, /* offset */ + &meminfo->base_address, + &meminfo->size, + &capture->program_desc_ctx.unpins_list[req->buffer_index]); + + mutex_unlock(&capture->program_desc_ctx.unpins_list_lock); + + return err; +} + +/** + * @brief Unpin an ISP process request and flush the memory. + * + * @param[in] capture ISP channel capture context + * @param[in] buffer_index Process descriptor queue index + */ +static inline void isp_capture_ivc_capture_cleanup( + struct isp_capture *capture, + uint32_t buffer_index) +{ + struct tegra_isp_channel *chan = capture->isp_channel; + + isp_capture_request_unpin(chan, buffer_index); + dma_sync_single_range_for_cpu(capture->rtcpu_dev, + capture->capture_desc_ctx.requests.iova, + buffer_index * capture->capture_desc_ctx.request_size, + capture->capture_desc_ctx.request_size, + DMA_FROM_DEVICE); +} + +/** + * @brief Signal completion or write progress status to notifier for ISP capture + * indication from RCE. + * + * If the ISP channel's progress status notifier is not set, the capture + * completion will be signalled. + * + * @param[in] capture ISP channel capture context + * @param[in] buffer_index Process descriptor queue index + */ +static inline void isp_capture_ivc_capture_signal( + struct isp_capture *capture, + uint32_t buffer_index) +{ + if (capture->is_progress_status_notifier_set) { + (void)capture_common_set_progress_status( + &capture->progress_status_notifier, + buffer_index, + capture->capture_desc_ctx.progress_status_buffer_depth, + PROGRESS_STATUS_DONE); + } else { + /* + * Only fire completions if not using + * the new progress status buffer mechanism + */ + complete(&capture->capture_resp); + } +} + +/** + * @brief Unpin an ISP program request and flush the memory. + * + * @param[in] capture ISP channel capture context + * @param[in] buffer_index Program descriptor queue index + */ +static inline void isp_capture_ivc_program_cleanup( + struct isp_capture *capture, + uint32_t buffer_index) +{ + struct tegra_isp_channel *chan = capture->isp_channel; + + isp_capture_program_request_unpin(chan, buffer_index); + dma_sync_single_range_for_cpu(capture->rtcpu_dev, + capture->program_desc_ctx.requests.iova, + buffer_index * capture->program_desc_ctx.request_size, + capture->program_desc_ctx.request_size, + DMA_FROM_DEVICE); +} + +/** + * @brief Signal completion or write progress status to notifier for ISP program + * indication from RCE. + * + * If the ISP channel's progress status notifier is not set, the program + * completion will be signalled. + * + * @param[in] capture ISP channel capture context + * @param[in] buffer_index Program descriptor queue index + */ +static inline void isp_capture_ivc_program_signal( + struct isp_capture *capture, + uint32_t buffer_index) +{ + if (capture->is_progress_status_notifier_set) { + /* + * Program status notifiers are after the process status + * notifiers; add the process status buffer depth as an offset. + */ + (void)capture_common_set_progress_status( + &capture->progress_status_notifier, + buffer_index + + capture->capture_desc_ctx.progress_status_buffer_depth, + capture->program_desc_ctx.progress_status_buffer_depth + + capture->capture_desc_ctx.progress_status_buffer_depth, + PROGRESS_STATUS_DONE); + } else { + /* + * Only fire completions if not using + * the new progress status buffer mechanism + */ + complete(&capture->capture_program_resp); + } +} + +/** + * @brief ISP channel callback function for @em capture IVC messages. + * + * @param[in] ivc_resp IVC @ref CAPTURE_MSG from RCE + * @param[in] pcontext ISP channel capture context + */ +static void isp_capture_ivc_status_callback( + const void *ivc_resp, + const void *pcontext) +{ + struct CAPTURE_MSG *status_msg = (struct CAPTURE_MSG *)ivc_resp; + struct isp_capture *capture = (struct isp_capture *)pcontext; + struct tegra_isp_channel *chan = capture->isp_channel; + uint32_t buffer_index; + + if (unlikely(capture == NULL)) { + dev_err(chan->isp_dev, "%s: invalid context", __func__); + return; + } + + if (unlikely(status_msg == NULL)) { + dev_err(chan->isp_dev, "%s: invalid response", __func__); + return; + } + + switch (status_msg->header.msg_id) { + case CAPTURE_ISP_STATUS_IND: + buffer_index = status_msg->capture_isp_status_ind.buffer_index; + isp_capture_ivc_capture_cleanup(capture, buffer_index); + isp_capture_ivc_capture_signal(capture, buffer_index); + dev_dbg(chan->isp_dev, "%s: status chan_id %u msg_id %u\n", + __func__, status_msg->header.channel_id, + status_msg->header.msg_id); + break; + case CAPTURE_ISP_PROGRAM_STATUS_IND: + buffer_index = + status_msg->capture_isp_program_status_ind.buffer_index; + isp_capture_ivc_program_cleanup(capture, buffer_index); + isp_capture_ivc_program_signal(capture, buffer_index); + dev_dbg(chan->isp_dev, + "%s: isp_ program status chan_id %u msg_id %u\n", + __func__, status_msg->header.channel_id, + status_msg->header.msg_id); + break; + case CAPTURE_ISP_EX_STATUS_IND: + buffer_index = + status_msg->capture_isp_ex_status_ind + .process_buffer_index; + isp_capture_ivc_program_cleanup(capture, + status_msg->capture_isp_ex_status_ind + .program_buffer_index); + isp_capture_ivc_capture_cleanup(capture, buffer_index); + isp_capture_ivc_capture_signal(capture, buffer_index); + + dev_dbg(chan->isp_dev, + "%s: isp extended status chan_id %u msg_id %u\n", + __func__, status_msg->header.channel_id, + status_msg->header.msg_id); + break; + default: + dev_err(chan->isp_dev, + "%s: unknown capture resp", __func__); + break; + } +} + +/** + * @brief Send a @em capture-control IVC message to RCE on an ISP channel, and + * block w/ timeout, waiting for the RCE response. + * + * @param[in] chan ISP channel context + * @param[in] msg IVC message payload + * @param[in] size Size of @a msg [byte] + * @param[in] resp_id IVC message identifier, see @CAPTURE_MSG_IDS + * + * @returns 0 (success), neg. errno (failure) + */ +static int isp_capture_ivc_send_control(struct tegra_isp_channel *chan, + const struct CAPTURE_CONTROL_MSG *msg, size_t size, + uint32_t resp_id) +{ + struct isp_capture *capture = chan->capture_data; + struct CAPTURE_MSG_HEADER resp_header = msg->header; + uint32_t timeout = HZ; + int err = 0; + + dev_dbg(chan->isp_dev, "%s: sending chan_id %u msg_id %u\n", + __func__, resp_header.channel_id, resp_header.msg_id); + + resp_header.msg_id = resp_id; + + /* Send capture control IVC message */ + mutex_lock(&capture->control_msg_lock); + err = tegra_capture_ivc_control_submit(msg, size); + if (err < 0) { + dev_err(chan->isp_dev, "IVC control submit failed\n"); + goto fail; + } + + timeout = wait_for_completion_timeout(&capture->control_resp, timeout); + if (timeout <= 0) { + dev_err(chan->isp_dev, + "isp capture control message timed out\n"); + err = -ETIMEDOUT; + goto fail; + } + + if (memcmp(&resp_header, &capture->control_resp_msg.header, + sizeof(resp_header)) != 0) { + dev_err(chan->isp_dev, + "unexpected response from camera processor\n"); + err = -EINVAL; + goto fail; + } + mutex_unlock(&capture->control_msg_lock); + + dev_dbg(chan->isp_dev, "%s: response chan_id %u msg_id %u\n", + __func__, capture->control_resp_msg.header.channel_id, + capture->control_resp_msg.header.msg_id); + return 0; + +fail: + mutex_unlock(&capture->control_msg_lock); + return err; +} + +/** + * @brief ISP channel callback function for @em capture-control IVC messages, + * this unblocks the channel's @em capture-control completion. + * + * @param[in] ivc_resp IVC @ref CAPTURE_CONTROL_MSG from RCE + * @param[in] pcontext ISP channel capture context + */ +static void isp_capture_ivc_control_callback( + const void *ivc_resp, + const void *pcontext) +{ + const struct CAPTURE_CONTROL_MSG *control_msg = ivc_resp; + struct isp_capture *capture = (struct isp_capture *)pcontext; + struct tegra_isp_channel *chan = capture->isp_channel; + + if (unlikely(capture == NULL)) { + dev_err(chan->isp_dev, "%s: invalid context", __func__); + return; + } + + if (unlikely(control_msg == NULL)) { + dev_err(chan->isp_dev, "%s: invalid response", __func__); + return; + } + + switch (control_msg->header.msg_id) { + case CAPTURE_CHANNEL_ISP_SETUP_RESP: + case CAPTURE_CHANNEL_ISP_RESET_RESP: + case CAPTURE_CHANNEL_ISP_RELEASE_RESP: + memcpy(&capture->control_resp_msg, control_msg, + sizeof(*control_msg)); + complete(&capture->control_resp); + break; + default: + dev_err(chan->isp_dev, + "%s: unknown capture isp control resp", __func__); + break; + } +} + +int isp_capture_init( + struct tegra_isp_channel *chan) +{ + struct isp_capture *capture; + struct device_node *dn; + struct platform_device *rtc_pdev; + + dev_dbg(chan->isp_dev, "%s++\n", __func__); + dn = of_find_node_by_path("tegra-camera-rtcpu"); + if (of_device_is_available(dn) == 0) { + dev_err(chan->isp_dev, "failed to find rtcpu device node\n"); + return -ENODEV; + } + rtc_pdev = of_find_device_by_node(dn); + if (rtc_pdev == NULL) { + dev_err(chan->isp_dev, "failed to find rtcpu platform\n"); + return -ENODEV; + } + + capture = kzalloc(sizeof(*capture), GFP_KERNEL); + if (unlikely(capture == NULL)) { + dev_err(chan->isp_dev, "failed to allocate capture channel\n"); + return -ENOMEM; + } + + capture->rtcpu_dev = &rtc_pdev->dev; + + init_completion(&capture->control_resp); + init_completion(&capture->capture_resp); + init_completion(&capture->capture_program_resp); + + mutex_init(&capture->control_msg_lock); + mutex_init(&capture->capture_desc_ctx.unpins_list_lock); + mutex_init(&capture->program_desc_ctx.unpins_list_lock); + mutex_init(&capture->reset_lock); + + capture->isp_channel = chan; + chan->capture_data = capture; + + capture->channel_id = CAPTURE_CHANNEL_ISP_INVALID_ID; + + capture->reset_capture_program_flag = false; + capture->reset_capture_flag = false; + + return 0; +} + +void isp_capture_shutdown( + struct tegra_isp_channel *chan) +{ + struct isp_capture *capture = chan->capture_data; + + dev_dbg(chan->isp_dev, "%s--\n", __func__); + if (capture == NULL) + return; + + if (capture->channel_id != CAPTURE_CHANNEL_ISP_INVALID_ID) { + /* No valid ISP reset flags defined now, use zero */ + isp_capture_reset(chan, 0); + isp_capture_release(chan, 0); + } + + kfree(capture); + chan->capture_data = NULL; +} + +int isp_capture_setup( + struct tegra_isp_channel *chan, + struct isp_capture_setup *setup) +{ + struct capture_buffer_table *buffer_ctx; + struct isp_capture *capture = chan->capture_data; + uint32_t transaction; + struct CAPTURE_CONTROL_MSG control_msg; + struct CAPTURE_CONTROL_MSG *resp_msg = &capture->control_resp_msg; + struct capture_channel_isp_config *config = + &control_msg.channel_isp_setup_req.channel_config; + int err = 0; +#ifdef HAVE_ISP_GOS_TABLES + int i; +#endif + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_SETUP); + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id != CAPTURE_CHANNEL_ISP_INVALID_ID) { + dev_err(chan->isp_dev, + "%s: already setup, release first\n", __func__); + return -EEXIST; + } + + dev_dbg(chan->isp_dev, "chan flags %u\n", setup->channel_flags); + dev_dbg(chan->isp_dev, "queue depth %u\n", setup->queue_depth); + dev_dbg(chan->isp_dev, "request size %u\n", setup->request_size); + + if (setup->channel_flags == 0 || + setup->queue_depth == 0 || + setup->request_size == 0) + return -EINVAL; + + buffer_ctx = create_buffer_table(chan->isp_dev); + if (unlikely(buffer_ctx == NULL)) { + dev_err(chan->isp_dev, "cannot setup buffer context"); + return -ENOMEM; + } + + /* pin the process descriptor ring buffer to RTCPU */ + dev_dbg(chan->isp_dev, "%s: descr buffer handle 0x%x\n", + __func__, setup->mem); + err = capture_common_pin_memory(capture->rtcpu_dev, + setup->mem, &capture->capture_desc_ctx.requests); + if (err < 0) { + dev_err(chan->isp_dev, "%s: memory setup failed\n", __func__); + goto pin_fail; + } + + /* pin the process descriptor ring buffer to ISP */ + err = capture_buffer_add(buffer_ctx, setup->mem); + if (err < 0) { + dev_err(chan->isp_dev, "%s: memory setup failed\n", __func__); + goto pin_fail; + } + + /* cache isp capture desc ring buffer details */ + capture->capture_desc_ctx.queue_depth = setup->queue_depth; + capture->capture_desc_ctx.request_size = setup->request_size; + capture->capture_desc_ctx.request_buf_size = setup->request_size * + setup->queue_depth; + + /* allocate isp capture desc unpin list based on queue depth */ + capture->capture_desc_ctx.unpins_list = vzalloc( + capture->capture_desc_ctx.queue_depth * + sizeof(*capture->capture_desc_ctx.unpins_list)); + if (unlikely(capture->capture_desc_ctx.unpins_list == NULL)) { + dev_err(chan->isp_dev, "failed to allocate unpins array\n"); + goto unpins_list_fail; + } + + /* Allocate memory info ring buffer for isp capture descriptors */ + capture->capture_desc_ctx.requests_memoryinfo = + dma_alloc_coherent(capture->rtcpu_dev, + capture->capture_desc_ctx.queue_depth * + sizeof(struct isp_capture_descriptor_memoryinfo), + &capture->capture_desc_ctx.requests_memoryinfo_iova, + GFP_KERNEL); + + if (!capture->capture_desc_ctx.requests_memoryinfo) { + dev_err(chan->isp_dev, + "%s: capture_desc_ctx meminfo alloc failed\n", + __func__); + goto capture_meminfo_alloc_fail; + } + + + /* pin the isp program descriptor ring buffer */ + dev_dbg(chan->isp_dev, "%s: descr buffer handle %u\n", + __func__, setup->isp_program_mem); + err = capture_common_pin_memory(capture->rtcpu_dev, + setup->isp_program_mem, + &capture->program_desc_ctx.requests); + if (err < 0) { + dev_err(chan->isp_dev, + "%s: isp_program memory setup failed\n", __func__); + goto prog_pin_fail; + } + + /* pin the isp program descriptor ring buffer to ISP */ + err = capture_buffer_add(buffer_ctx, setup->isp_program_mem); + if (err < 0) { + dev_err(chan->isp_dev, + "%s: isp_program memory setup failed\n", __func__); + goto prog_pin_fail; + } + + /* cache isp program desc ring buffer details */ + capture->program_desc_ctx.queue_depth = setup->isp_program_queue_depth; + capture->program_desc_ctx.request_size = + setup->isp_program_request_size; + capture->program_desc_ctx.request_buf_size = + setup->isp_program_request_size * + setup->isp_program_queue_depth; + + /* allocate isp program unpin list based on queue depth */ + capture->program_desc_ctx.unpins_list = vzalloc( + capture->program_desc_ctx.queue_depth * + sizeof(*capture->program_desc_ctx.unpins_list)); + if (unlikely(capture->program_desc_ctx.unpins_list == NULL)) { + dev_err(chan->isp_dev, + "failed to allocate isp program unpins array\n"); + goto prog_unpins_list_fail; + } + + /* Allocate memory info ring buffer for program descriptors */ + capture->program_desc_ctx.requests_memoryinfo = + dma_alloc_coherent(capture->rtcpu_dev, + capture->program_desc_ctx.queue_depth * + sizeof(struct memoryinfo_surface), + &capture->program_desc_ctx.requests_memoryinfo_iova, + GFP_KERNEL); + + if (!capture->program_desc_ctx.requests_memoryinfo) { + dev_err(chan->isp_dev, + "%s: program_desc_ctx meminfo alloc failed\n", + __func__); + goto program_meminfo_alloc_fail; + } + + err = isp_capture_setup_syncpts(chan); + if (err < 0) { + dev_err(chan->isp_dev, "%s: syncpt setup failed\n", __func__); + goto syncpt_fail; + } + + err = tegra_capture_ivc_register_control_cb( + &isp_capture_ivc_control_callback, + &transaction, capture); + if (err < 0) { + dev_err(chan->isp_dev, "failed to register control callback\n"); + goto control_cb_fail; + } + + /* Fill in control config msg to be sent over ctrl ivc chan to RTCPU */ + memset(&control_msg, 0, sizeof(control_msg)); + + control_msg.header.msg_id = CAPTURE_CHANNEL_ISP_SETUP_REQ; + control_msg.header.transaction = transaction; + + config->channel_flags = setup->channel_flags; + + config->request_queue_depth = setup->queue_depth; + config->request_size = setup->request_size; + config->requests = capture->capture_desc_ctx.requests.iova; + config->requests_memoryinfo = + capture->capture_desc_ctx.requests_memoryinfo_iova; + config->request_memoryinfo_size = + sizeof(struct isp_capture_descriptor_memoryinfo); + + config->program_queue_depth = setup->isp_program_queue_depth; + config->program_size = setup->isp_program_request_size; + config->programs = capture->program_desc_ctx.requests.iova; + config->programs_memoryinfo = + capture->program_desc_ctx.requests_memoryinfo_iova; + config->program_memoryinfo_size = + sizeof(struct memoryinfo_surface); + + config->progress_sp = capture->progress_sp; + config->stats_progress_sp = capture->stats_progress_sp; + +#ifdef HAVE_ISP_GOS_TABLES + dev_dbg(chan->isp_dev, "%u GoS tables configured.\n", + capture->num_gos_tables); + for (i = 0; i < capture->num_gos_tables; i++) { + config->isp_gos_tables[i] = (iova_t)capture->gos_tables[i]; + dev_dbg(chan->isp_dev, "gos[%d] = 0x%08llx\n", + i, (u64)capture->gos_tables[i]); + } + config->num_isp_gos_tables = capture->num_gos_tables; +#endif + + err = isp_capture_ivc_send_control(chan, &control_msg, + sizeof(control_msg), CAPTURE_CHANNEL_ISP_SETUP_RESP); + if (err < 0) + goto submit_fail; + + if (resp_msg->channel_isp_setup_resp.result != CAPTURE_OK) { + dev_err(chan->isp_dev, "%s: control failed, errno %d", __func__, + resp_msg->channel_setup_resp.result); + err = -EIO; + goto submit_fail; + } + + capture->channel_id = resp_msg->channel_isp_setup_resp.channel_id; + + err = tegra_capture_ivc_notify_chan_id(capture->channel_id, + transaction); + if (err < 0) { + dev_err(chan->isp_dev, "failed to update control callback\n"); + goto cb_fail; + } + + err = tegra_capture_ivc_register_capture_cb( + &isp_capture_ivc_status_callback, + capture->channel_id, capture); + if (err < 0) { + dev_err(chan->isp_dev, "failed to register capture callback\n"); + goto cb_fail; + } + + capture->buffer_ctx = buffer_ctx; + + return 0; + +cb_fail: + if (isp_capture_release(chan, CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE)) + destroy_buffer_table(buffer_ctx); + return err; +submit_fail: + tegra_capture_ivc_unregister_control_cb(transaction); +control_cb_fail: + isp_capture_release_syncpts(chan); +syncpt_fail: + dma_free_coherent(capture->rtcpu_dev, + capture->program_desc_ctx.queue_depth * + sizeof(struct memoryinfo_surface), + capture->program_desc_ctx.requests_memoryinfo, + capture->program_desc_ctx.requests_memoryinfo_iova); +program_meminfo_alloc_fail: + vfree(capture->program_desc_ctx.unpins_list); +prog_unpins_list_fail: + capture_common_unpin_memory(&capture->program_desc_ctx.requests); +prog_pin_fail: + dma_free_coherent(capture->rtcpu_dev, + capture->capture_desc_ctx.queue_depth * + sizeof(struct isp_capture_descriptor_memoryinfo), + capture->capture_desc_ctx.requests_memoryinfo, + capture->capture_desc_ctx.requests_memoryinfo_iova); +capture_meminfo_alloc_fail: + vfree(capture->capture_desc_ctx.unpins_list); +unpins_list_fail: + capture_common_unpin_memory(&capture->capture_desc_ctx.requests); +pin_fail: + destroy_buffer_table(buffer_ctx); + return err; +} + +int isp_capture_release( + struct tegra_isp_channel *chan, + uint32_t reset_flags) +{ + struct isp_capture *capture = chan->capture_data; + struct CAPTURE_CONTROL_MSG control_msg; + struct CAPTURE_CONTROL_MSG *resp_msg = &capture->control_resp_msg; + int i; + int err = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_RELEASE); + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_ISP_INVALID_ID) { + dev_err(chan->isp_dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + memset(&control_msg, 0, sizeof(control_msg)); + + control_msg.header.msg_id = CAPTURE_CHANNEL_ISP_RELEASE_REQ; + control_msg.header.channel_id = capture->channel_id; + control_msg.channel_release_req.reset_flags = reset_flags; + + err = isp_capture_ivc_send_control(chan, &control_msg, + sizeof(control_msg), CAPTURE_CHANNEL_ISP_RELEASE_RESP); + if (err < 0) + goto error; + + if (resp_msg->channel_isp_release_resp.result != CAPTURE_OK) { + dev_err(chan->isp_dev, "%s: control failed, errno %d", __func__, + resp_msg->channel_release_resp.result); + err = -EINVAL; + goto error; + } + + err = tegra_capture_ivc_unregister_capture_cb(capture->channel_id); + if (err < 0) { + dev_err(chan->isp_dev, + "failed to unregister capture callback\n"); + goto error; + } + + err = tegra_capture_ivc_unregister_control_cb(capture->channel_id); + if (err < 0) { + dev_err(chan->isp_dev, + "failed to unregister control callback\n"); + goto error; + } + + for (i = 0; i < capture->program_desc_ctx.queue_depth; i++) { + complete(&capture->capture_program_resp); + isp_capture_program_request_unpin(chan, i); + } + + capture_common_unpin_memory(&capture->program_desc_ctx.requests); + + for (i = 0; i < capture->capture_desc_ctx.queue_depth; i++) { + complete(&capture->capture_resp); + isp_capture_request_unpin(chan, i); + } + + spec_bar(); + + isp_capture_release_syncpts(chan); + + capture_common_unpin_memory(&capture->capture_desc_ctx.requests); + + vfree(capture->program_desc_ctx.unpins_list); + capture->program_desc_ctx.unpins_list = NULL; + vfree(capture->capture_desc_ctx.unpins_list); + capture->capture_desc_ctx.unpins_list = NULL; + + dma_free_coherent(capture->rtcpu_dev, + capture->program_desc_ctx.queue_depth * + sizeof(struct memoryinfo_surface), + capture->program_desc_ctx.requests_memoryinfo, + capture->program_desc_ctx.requests_memoryinfo_iova); + + dma_free_coherent(capture->rtcpu_dev, + capture->capture_desc_ctx.queue_depth * + sizeof(struct isp_capture_descriptor_memoryinfo), + capture->capture_desc_ctx.requests_memoryinfo, + capture->capture_desc_ctx.requests_memoryinfo_iova); + + if (capture->is_progress_status_notifier_set) + capture_common_release_progress_status_notifier( + &capture->progress_status_notifier); + + destroy_buffer_table(capture->buffer_ctx); + capture->buffer_ctx = NULL; + + capture->channel_id = CAPTURE_CHANNEL_ISP_INVALID_ID; + + return 0; + +error: + return err; +} + +int isp_capture_reset( + struct tegra_isp_channel *chan, + uint32_t reset_flags) +{ + struct isp_capture *capture = chan->capture_data; +#ifdef CAPTURE_ISP_RESET_BARRIER_IND + struct CAPTURE_MSG capture_msg; +#endif + struct CAPTURE_CONTROL_MSG control_msg; + struct CAPTURE_CONTROL_MSG *resp_msg = &capture->control_resp_msg; + int i; + int err = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_RESET); + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_ISP_INVALID_ID) { + dev_err(chan->isp_dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + mutex_lock(&capture->reset_lock); + capture->reset_capture_program_flag = true; + capture->reset_capture_flag = true; + +#ifdef CAPTURE_ISP_RESET_BARRIER_IND + memset(&capture_msg, 0, sizeof(capture_msg)); + capture_msg.header.msg_id = CAPTURE_ISP_RESET_BARRIER_IND; + capture_msg.header.channel_id = capture->channel_id; + + err = tegra_capture_ivc_capture_submit(&capture_msg, + sizeof(capture_msg)); + if (err < 0) { + dev_err(chan->isp_dev, "IVC capture submit failed\n"); + goto error; + } +#endif + + memset(&control_msg, 0, sizeof(control_msg)); + control_msg.header.msg_id = CAPTURE_CHANNEL_ISP_RESET_REQ; + control_msg.header.channel_id = capture->channel_id; + control_msg.channel_isp_reset_req.reset_flags = reset_flags; + + err = isp_capture_ivc_send_control(chan, &control_msg, + sizeof(control_msg), CAPTURE_CHANNEL_ISP_RESET_RESP); + if (err < 0) + goto error; + +#ifdef CAPTURE_ISP_RESET_BARRIER_IND + if (resp_msg->channel_isp_reset_resp.result == CAPTURE_ERROR_TIMEOUT) { + dev_dbg(chan->isp_dev, "%s: isp reset timedout\n", __func__); + err = -EAGAIN; + goto error; + } +#endif + + if (resp_msg->channel_isp_reset_resp.result != CAPTURE_OK) { + dev_err(chan->isp_dev, "%s: control failed, errno %d", __func__, + resp_msg->channel_isp_reset_resp.result); + err = -EINVAL; + goto error; + } + + for (i = 0; i < capture->program_desc_ctx.queue_depth; i++) { + isp_capture_program_request_unpin(chan, i); + complete(&capture->capture_program_resp); + } + spec_bar(); + + for (i = 0; i < capture->capture_desc_ctx.queue_depth; i++) { + isp_capture_request_unpin(chan, i); + complete(&capture->capture_resp); + } + + spec_bar(); + + err = 0; + +error: + mutex_unlock(&capture->reset_lock); + return err; +} + +int isp_capture_get_info( + struct tegra_isp_channel *chan, + struct isp_capture_info *info) +{ + struct isp_capture *capture = chan->capture_data; + int err; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_GET_INFO); + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_ISP_INVALID_ID) { + dev_err(chan->isp_dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + if (info == NULL) { + dev_err(chan->isp_dev, + "%s: Invalid user parameter\n", __func__); + return -EINVAL; + } + + info->channel_id = capture->channel_id; + + info->syncpts.progress_syncpt = capture->progress_sp.id; + info->syncpts.stats_progress_syncpt = + capture->stats_progress_sp.id; + + err = isp_capture_read_syncpt(chan, &capture->progress_sp, + &info->syncpts.progress_syncpt_val); + if (err < 0) + return err; + + err = isp_capture_read_syncpt(chan, &capture->stats_progress_sp, + &info->syncpts.stats_progress_syncpt_val); + if (err < 0) + return err; + + return 0; +} + +/** + * Pin/map buffers and save iova boundaries into corresponding + * memoryinfo struct. + */ +static int pin_isp_capture_request_buffers_locked( + struct tegra_isp_channel *chan, + struct isp_capture_req *req, + struct capture_common_unpins *request_unpins) +{ + struct isp_desc_rec *capture_desc_ctx = + &chan->capture_data->capture_desc_ctx; + struct isp_capture_descriptor *desc = (struct isp_capture_descriptor *) + (capture_desc_ctx->requests.va + + req->buffer_index * capture_desc_ctx->request_size); + + struct isp_capture_descriptor_memoryinfo *desc_mem = + &((struct isp_capture_descriptor_memoryinfo *) + capture_desc_ctx->requests_memoryinfo) + [req->buffer_index]; + + struct capture_buffer_table *buffer_ctx = + chan->capture_data->buffer_ctx; + int i, j; + int err = 0; + + /* Pushbuffer 2 is located after isp desc, in same ringbuffer */ + uint32_t request_offset = req->buffer_index * + capture_desc_ctx->request_size; + + err = capture_common_pin_and_get_iova(buffer_ctx, + (uint32_t)(desc->isp_pb2_mem >> 32U), + ((uint32_t)desc->isp_pb2_mem) + request_offset, + &desc_mem->isp_pb2_mem.base_address, + &desc_mem->isp_pb2_mem.size, + request_unpins); + + if (err) { + dev_err(chan->isp_dev, "%s: get pushbuffer2 iova failed\n", + __func__); + goto fail; + } + + for (i = 0; i < ISP_MAX_INPUT_SURFACES; i++) { + err = capture_common_pin_and_get_iova(buffer_ctx, + desc->input_mr_surfaces[i].offset_hi, + desc->input_mr_surfaces[i].offset, + &desc_mem->input_mr_surfaces[i].base_address, + &desc_mem->input_mr_surfaces[i].size, + request_unpins); + + if (err) { + dev_err(chan->isp_dev, + "%s: get input_mr_surfaces iova failed\n", + __func__); + goto fail; + } + } + + for (i = 0; i < ISP_MAX_OUTPUTS; i++) { + for (j = 0; j < ISP_MAX_OUTPUT_SURFACES; j++) { + err = capture_common_pin_and_get_iova(buffer_ctx, + desc->outputs_mw[i].surfaces[j].offset_hi, + desc->outputs_mw[i].surfaces[j].offset, + &desc_mem->outputs_mw[i].surfaces[j].base_address, + &desc_mem->outputs_mw[i].surfaces[j].size, + request_unpins); + + if (err) { + dev_err(chan->isp_dev, + "%s: get outputs_mw iova failed\n", + __func__); + goto fail; + } + } + } + + /* Pin stats surfaces */ + { + struct stats_surface *stats_surfaces[] = { + &desc->fb_surface, &desc->fm_surface, + &desc->afm_surface, &desc->lac0_surface, + &desc->lac1_surface, &desc->h0_surface, + &desc->h1_surface, &desc->hist_raw24_surface, + &desc->pru_bad_surface, &desc->ltm_surface, + }; + + struct memoryinfo_surface *meminfo_surfaces[] = { + &desc_mem->fb_surface, &desc_mem->fm_surface, + &desc_mem->afm_surface, &desc_mem->lac0_surface, + &desc_mem->lac1_surface, &desc_mem->h0_surface, + &desc_mem->h1_surface, &desc_mem->hist_raw24_surface, + &desc_mem->pru_bad_surface, &desc_mem->ltm_surface, + }; + + BUILD_BUG_ON(ARRAY_SIZE(stats_surfaces) != + ARRAY_SIZE(meminfo_surfaces)); + + for (i = 0; i < ARRAY_SIZE(stats_surfaces); i++) { + err = capture_common_pin_and_get_iova(buffer_ctx, + stats_surfaces[i]->offset_hi, + stats_surfaces[i]->offset, + &meminfo_surfaces[i]->base_address, + &meminfo_surfaces[i]->size, + request_unpins); + if (err) + goto fail; + } + } + + /* pin engine status surface */ + err = capture_common_pin_and_get_iova(buffer_ctx, + desc->engine_status.offset_hi, + desc->engine_status.offset, + &desc_mem->engine_status.base_address, + &desc_mem->engine_status.size, + request_unpins); +fail: + /* Unpin cleanup is done in isp_capture_request_unpin() */ + return err; +} + +int isp_capture_request( + struct tegra_isp_channel *chan, + struct isp_capture_req *req) +{ + struct isp_capture *capture = chan->capture_data; + struct CAPTURE_MSG capture_msg; + uint32_t request_offset; + int err = 0; + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_ISP_INVALID_ID) { + dev_err(chan->isp_dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + if (req == NULL) { + dev_err(chan->isp_dev, + "%s: Invalid req\n", __func__); + return -EINVAL; + } + + if (capture->capture_desc_ctx.unpins_list == NULL) { + dev_err(chan->isp_dev, "Channel setup incomplete\n"); + return -EINVAL; + } + + if (req->buffer_index >= capture->capture_desc_ctx.queue_depth) { + dev_err(chan->isp_dev, "buffer index is out of bound\n"); + return -EINVAL; + } + + spec_bar(); + + mutex_lock(&capture->reset_lock); + if (capture->reset_capture_flag) { + /* consume any pending completions when coming out of reset */ + while (try_wait_for_completion(&capture->capture_resp)) + ; /* do nothing */ + } + capture->reset_capture_flag = false; + mutex_unlock(&capture->reset_lock); + + memset(&capture_msg, 0, sizeof(capture_msg)); + capture_msg.header.msg_id = CAPTURE_ISP_REQUEST_REQ; + capture_msg.header.channel_id = capture->channel_id; + capture_msg.capture_isp_request_req.buffer_index = req->buffer_index; + + request_offset = req->buffer_index * + capture->capture_desc_ctx.request_size; + + err = isp_capture_setup_inputfences(chan, req, request_offset); + if (err < 0) { + dev_err(chan->isp_dev, "failed to setup inputfences\n"); + goto fail; + } + + err = isp_capture_setup_prefences(chan, req, request_offset); + if (err < 0) { + dev_err(chan->isp_dev, "failed to setup prefences\n"); + goto fail; + } + + mutex_lock(&capture->capture_desc_ctx.unpins_list_lock); + + if (capture->capture_desc_ctx.unpins_list[req->buffer_index].num_unpins != 0U) { + dev_err(chan->isp_dev, + "%s: descriptor is still in use by rtcpu\n", + __func__); + mutex_unlock(&capture->capture_desc_ctx.unpins_list_lock); + return -EBUSY; + } + + err = pin_isp_capture_request_buffers_locked(chan, req, + &capture->capture_desc_ctx.unpins_list[req->buffer_index]); + + mutex_unlock(&capture->capture_desc_ctx.unpins_list_lock); + + if (err < 0) { + dev_err(chan->isp_dev, "%s failed to pin request buffers\n", + __func__); + goto fail; + } + + nv_camera_log_submit( + chan->ndev, + capture->progress_sp.id, + capture->progress_sp.threshold, + capture_msg.header.channel_id, + __arch_counter_get_cntvct()); + + dev_dbg(chan->isp_dev, "%s: sending chan_id %u msg_id %u buf:%u\n", + __func__, capture_msg.header.channel_id, + capture_msg.header.msg_id, req->buffer_index); + + + err = tegra_capture_ivc_capture_submit(&capture_msg, + sizeof(capture_msg)); + if (err < 0) { + dev_err(chan->isp_dev, "IVC capture submit failed\n"); + goto fail; + } + + return 0; + +fail: + isp_capture_request_unpin(chan, req->buffer_index); + return err; +} + +int isp_capture_status( + struct tegra_isp_channel *chan, + int32_t timeout_ms) +{ + struct isp_capture *capture = chan->capture_data; + int err = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_STATUS); + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_ISP_INVALID_ID) { + dev_err(chan->isp_dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + /* negative timeout means wait forever */ + if (timeout_ms < 0) { + err = wait_for_completion_killable(&capture->capture_resp); + } else { + err = wait_for_completion_killable_timeout( + &capture->capture_resp, + msecs_to_jiffies(timeout_ms)); + if (err == 0) { + dev_dbg(chan->isp_dev, + "isp capture status timed out\n"); + return -ETIMEDOUT; + } + } + + if (err < 0) { + dev_err(chan->isp_dev, + "wait for capture status failed\n"); + return err; + } + + mutex_lock(&capture->reset_lock); + if (capture->reset_capture_flag) { + mutex_unlock(&capture->reset_lock); + return -EIO; + } + mutex_unlock(&capture->reset_lock); + + return 0; +} + +int isp_capture_program_request( + struct tegra_isp_channel *chan, + struct isp_program_req *req) +{ + struct isp_capture *capture = chan->capture_data; + struct CAPTURE_MSG capture_msg; + int err = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_PROGRAM_REQUEST); + + err = isp_capture_program_prepare(chan, req); + if (err < 0) { + /* no cleanup needed */ + return err; + } + + memset(&capture_msg, 0, sizeof(capture_msg)); + capture_msg.header.msg_id = CAPTURE_ISP_PROGRAM_REQUEST_REQ; + capture_msg.header.channel_id = capture->channel_id; + capture_msg.capture_isp_program_request_req.buffer_index = + req->buffer_index; + + dev_dbg(chan->isp_dev, "%s: sending chan_id %u msg_id %u buf:%u\n", + __func__, capture_msg.header.channel_id, + capture_msg.header.msg_id, req->buffer_index); + + err = tegra_capture_ivc_capture_submit(&capture_msg, + sizeof(capture_msg)); + if (err < 0) { + dev_err(chan->isp_dev, "IVC program submit failed\n"); + isp_capture_program_request_unpin(chan, req->buffer_index); + return err; + } + + return 0; +} + +int isp_capture_program_status( + struct tegra_isp_channel *chan) +{ + struct isp_capture *capture = chan->capture_data; + int err = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_PROGRAM_STATUS); + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_ISP_INVALID_ID) { + dev_err(chan->isp_dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + dev_dbg(chan->isp_dev, "%s: waiting for isp program status\n", + __func__); + + /* no timeout as an isp_program may get used for mutliple frames */ + err = wait_for_completion_killable(&capture->capture_program_resp); + if (err < 0) { + dev_err(chan->isp_dev, + "isp program status wait failed\n"); + return err; + } + + mutex_lock(&capture->reset_lock); + if (capture->reset_capture_program_flag) { + mutex_unlock(&capture->reset_lock); + return -EIO; + } + mutex_unlock(&capture->reset_lock); + + return 0; +} + +int isp_capture_request_ex( + struct tegra_isp_channel *chan, + struct isp_capture_req_ex *req) +{ + int err; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_REQUEST_EX); + + if (req->program_req.buffer_index == U32_MAX) { + /* forward to process request */ + return isp_capture_request(chan, &req->capture_req); + } + + err = isp_capture_program_prepare(chan, &req->program_req); + + if (err < 0) { + /* no cleanup required */ + return err; + } + + err = isp_capture_request(chan, &req->capture_req); + + if (err < 0) { + /* unpin prepared program */ + isp_capture_program_request_unpin( + chan, req->program_req.buffer_index); + } + + return err; +} + +int isp_capture_set_progress_status_notifier( + struct tegra_isp_channel *chan, + struct isp_capture_progress_status_req *req) +{ + int err = 0; + struct isp_capture *capture = chan->capture_data; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_ISP_CAPTURE_SET_PROGRESS_STATUS); + + if (req->mem == 0 || + req->process_buffer_depth == 0) { + dev_err(chan->isp_dev, + "%s: process request buffer is invalid\n", + __func__); + return -EINVAL; + } + + if (req->mem == 0 || + req->program_buffer_depth == 0) { + dev_err(chan->isp_dev, + "%s: program request buffer is invalid\n", + __func__); + return -EINVAL; + } + + if (capture == NULL) { + dev_err(chan->isp_dev, + "%s: isp capture uninitialized\n", __func__); + return -ENODEV; + } + + if (req->process_buffer_depth < capture->capture_desc_ctx.queue_depth) { + dev_err(chan->isp_dev, + "%s: Process progress status buffer smaller than queue depth\n", + __func__); + return -EINVAL; + } + + if (req->program_buffer_depth < capture->program_desc_ctx.queue_depth) { + dev_err(chan->isp_dev, + "%s: Program progress status buffer smaller than queue depth\n", + __func__); + return -EINVAL; + } + + if (req->process_buffer_depth > U32_MAX - req->program_buffer_depth) { + dev_err(chan->isp_dev, + "%s: Process and Program status buffer larger than expected\n", + __func__); + return -EINVAL; + } + + if ((req->process_buffer_depth + req->program_buffer_depth) > + (U32_MAX / sizeof(uint32_t))) { + dev_err(chan->isp_dev, + "%s: Process and Program status buffer larger than expected\n", + __func__); + return -EINVAL; + } + + /* Setup the progress status buffer */ + err = capture_common_setup_progress_status_notifier( + &capture->progress_status_notifier, + req->mem, + (req->process_buffer_depth + req->program_buffer_depth) * + sizeof(uint32_t), + req->mem_offset); + + if (err < 0) { + dev_err(chan->isp_dev, + "%s: Process progress status setup failed\n", + __func__); + return -EFAULT; + } + + dev_dbg(chan->isp_dev, "Progress status mem offset %u\n", + req->mem_offset); + dev_dbg(chan->isp_dev, "Process buffer depth %u\n", + req->process_buffer_depth); + dev_dbg(chan->isp_dev, "Program buffer depth %u\n", + req->program_buffer_depth); + + capture->capture_desc_ctx.progress_status_buffer_depth = + req->process_buffer_depth; + capture->program_desc_ctx.progress_status_buffer_depth = + req->program_buffer_depth; + + capture->is_progress_status_notifier_set = true; + return err; +} + +int isp_capture_buffer_request( + struct tegra_isp_channel *chan, + struct isp_buffer_req *req) +{ + struct isp_capture *capture = chan->capture_data; + int err; + + err = capture_buffer_request( + capture->buffer_ctx, req->mem, req->flag); + return err; +} diff --git a/drivers/media/platform/tegra/camera/fusa-capture/capture-vi-channel.c b/drivers/media/platform/tegra/camera/fusa-capture/capture-vi-channel.c new file mode 100644 index 00000000..6b91d390 --- /dev/null +++ b/drivers/media/platform/tegra/camera/fusa-capture/capture-vi-channel.c @@ -0,0 +1,767 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + +/** + * @file drivers/media/platform/tegra/camera/fusa-capture/capture-vi-channel.c + * + * @brief VI channel character device driver for the T186/T194 Camera RTCPU + * platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * @defgroup VI_CHANNEL_IOCTLS + * + * @brief VI channel character device IOCTL API + * + * Clients in the UMD may open sysfs character devices representing VI channels, + * and perform configuration and enqueue buffers in capture requests to the + * low-level RCE subsystem via these IOCTLs. + * + * @{ + */ + +/** + * @brief Set up ISP channel resources and request FW channel allocation in RCE. + * + * Initialize the VI channel context and synchronization primitives, pin memory + * for the capture descriptor queue, set up the buffer management table, + * initialize the capture/capture-control IVC channels and request VI FW channel + * allocation in RCE. + * + * @param[in] ptr Pointer to a struct @ref vi_capture_setup + * + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_SETUP \ + _IOW('I', 1, struct vi_capture_setup) + +/** + * @brief Release the VI FW channel allocation in RCE, and all resources and + * contexts in the KMD. + * + * @param[in] reset_flags uint32_t bitmask of + * @ref CAPTURE_CHANNEL_RESET_FLAGS + + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_RELEASE \ + _IOW('I', 2, __u32) + +/** + * @brief Execute a blocking capture-control IVC request to RCE. + * + * @param[in] ptr Pointer to a struct @ref vi_capture_control_msg + * + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_SET_CONFIG \ + _IOW('I', 3, struct vi_capture_control_msg) + +/** + * @brief Reset the VI channel in RCE synchronously w/ the KMD; all pending + * capture descriptors in the queue are discarded and syncpoint values + * fast-forwarded to unblock waiting clients. + * + * @param[in] reset_flags uint32_t bitmask of + * @ref CAPTURE_CHANNEL_RESET_FLAGS + + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_RESET \ + _IOW('I', 4, __u32) + +/** + * @brief Retrieve the ids and current values of the progress, embedded data and + * line timer syncpoints, and VI HW channel(s) allocated by RCE. + * + * If successful, the queried values are written back to the input struct. + * + * @param[in,out] ptr Pointer to a struct @ref vi_capture_info + * + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_GET_INFO \ + _IOR('I', 5, struct vi_capture_info) + +/** + * @brief Enqueue a capture request to RCE, the addresses to surface buffers in + * the descriptor (referenced by the buffer_index) are pinned and patched. + * + * The payload shall be a pointer to a struct @ref vi_capture_req. + * + * @param[in] ptr Pointer to a struct @ref vi_capture_compand + * + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_REQUEST \ + _IOW('I', 6, struct vi_capture_req) + +/** + * Wait on the next completion of an enqueued frame, signalled by RCE. The + * status in the frame's capture descriptor is safe to read when this completes + * w/o a -ETIMEDOUT or other error. + * + * @note This call completes for the frame at the head of the FIFO queue, and is + * not necessarily for the most recently enqueued capture request. + * + * @param[in] timeout_ms uint32_t timeout [ms], 0 for indefinite + * + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_STATUS \ + _IOW('I', 7, __u32) + +/** + * @brief Set up the capture progress status notifier array, which is a + * replacement for the blocking @ref VI_CAPTURE_STATUS call; allowing for + * out-of-order frame completion notifications. + * + * The values written by the KMD are any of the + * @ref CAPTURE_PROGRESS_NOTIFIER_STATES. + * + * @param[in] ptr Pointer to a struct @ref vi_capture_progress_status_req + * + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_SET_PROGRESS_STATUS_NOTIFIER \ + _IOW('I', 9, struct vi_capture_progress_status_req) + +/** + * @brief Perform an operation on the surface buffer by setting the bitwise + * @a flag field with @ref CAPTURE_BUFFER_OPS flags. + * + * @param[in] ptr Pointer to a struct @ref vi_buffer_req + * @returns 0 (success), neg. errno (failure) + */ +#define VI_CAPTURE_BUFFER_REQUEST \ + _IOW('I', 10, struct vi_buffer_req) + +/** @} */ + +void vi_capture_request_unpin( + struct tegra_vi_channel *chan, + uint32_t buffer_index) +{ + struct vi_capture *capture = chan->capture_data; + struct capture_common_unpins *unpins; + int i = 0; + + mutex_lock(&capture->unpins_list_lock); + unpins = &capture->unpins_list[buffer_index]; + + if (unpins->num_unpins != 0) { + for (i = 0; i < unpins->num_unpins; i++) { + if (capture->buf_ctx != NULL && unpins->data[i] != NULL) + put_mapping(capture->buf_ctx, unpins->data[i]); + } + (void)memset(unpins, 0U,sizeof(*unpins)); + } + mutex_unlock(&capture->unpins_list_lock); +} +EXPORT_SYMBOL(vi_capture_request_unpin); + +static struct vi_channel_drv *chdrv_; +static DEFINE_MUTEX(chdrv_lock); + +struct tegra_vi_channel *vi_channel_open_ex( + unsigned int channel, + bool is_mem_pinned) +{ + struct tegra_vi_channel *chan; + struct vi_channel_drv *chan_drv; + int err; + + if (mutex_lock_interruptible(&chdrv_lock)) + return ERR_PTR(-ERESTARTSYS); + + chan_drv = chdrv_; + + if (chan_drv == NULL || channel >= chan_drv->num_channels) { + mutex_unlock(&chdrv_lock); + return ERR_PTR(-ENODEV); + } + mutex_unlock(&chdrv_lock); + + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (unlikely(chan == NULL)) + return ERR_PTR(-ENOMEM); + + chan->drv = chan_drv; + if (chan_drv->use_legacy_path) { + chan->dev = chan_drv->dev; + chan->ndev = chan_drv->ndev; + } else + chan->vi_capture_pdev = chan_drv->vi_capture_pdev; + + chan->ops = chan_drv->ops; + + err = vi_capture_init(chan, is_mem_pinned); + if (err < 0) + goto error; + + mutex_lock(&chan_drv->lock); + if (rcu_access_pointer(chan_drv->channels[channel]) != NULL) { + mutex_unlock(&chan_drv->lock); + err = -EBUSY; + goto rcu_err; + } + + rcu_assign_pointer(chan_drv->channels[channel], chan); + mutex_unlock(&chan_drv->lock); + + return chan; + +rcu_err: + vi_capture_shutdown(chan); +error: + kfree(chan); + return ERR_PTR(err); +} +EXPORT_SYMBOL(vi_channel_open_ex); + +int vi_channel_close_ex( + unsigned int channel, + struct tegra_vi_channel *chan) +{ + struct vi_channel_drv *chan_drv = chan->drv; + + vi_capture_shutdown(chan); + + mutex_lock(&chan_drv->lock); + + WARN_ON(rcu_access_pointer(chan_drv->channels[channel]) != chan); + RCU_INIT_POINTER(chan_drv->channels[channel], NULL); + + mutex_unlock(&chan_drv->lock); + kfree_rcu(chan, rcu); + + return 0; +} +EXPORT_SYMBOL(vi_channel_close_ex); + +/** + * @brief Open a VI channel character device node; pass parameters to + * @ref vi_channel_open_ex subroutine to complete initialization. + * + * This is the @a open file operation handler for a VI channel node. + * + * @param[in] inode VI channel character device inode struct + * @param[in] file VI channel character device file struct + * + * @returns 0 (success), neg. errno (failure) + */ +static int vi_channel_open( + struct inode *inode, + struct file *file) +{ + unsigned int channel = iminor(inode); + struct tegra_vi_channel *chan; + + chan = vi_channel_open_ex(channel, true); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + file->private_data = chan; + + return nonseekable_open(inode, file); +} + +/** + * @brief Release a VI channel character device node; pass parameters to + * @ref vi_channel_close_ex subroutine to complete release. + * + * This is the @a release file operation handler for a VI channel node. + * + * @param[in] inode VI channel character device inode struct + * @param[in] file VI channel character device file struct + * + * @returns 0 + */ +static int vi_channel_release( + struct inode *inode, + struct file *file) +{ + struct tegra_vi_channel *chan = file->private_data; + unsigned int channel = iminor(inode); + + vi_channel_close_ex(channel, chan); + + return 0; +} + +/** + * Pin/map buffers and save iova boundaries into corresponding + * memoryinfo struct. + */ +static int pin_vi_capture_request_buffers_locked(struct tegra_vi_channel *chan, + struct vi_capture_req *req, + struct capture_common_unpins *request_unpins) +{ + struct vi_capture *capture = chan->capture_data; + struct capture_descriptor* desc = (struct capture_descriptor*) + (capture->requests.va + + req->buffer_index * capture->request_size); + + struct capture_descriptor_memoryinfo* desc_mem = + &capture->requests_memoryinfo[req->buffer_index]; + int i; + int err = 0; + + /* Buffer count: ATOMP surfaces + engine_surface */ + BUG_ON(VI_NUM_ATOMP_SURFACES + 1U >= MAX_PIN_BUFFER_PER_REQUEST); + + for (i = 0; i < VI_NUM_ATOMP_SURFACES; i++) { + err = capture_common_pin_and_get_iova(capture->buf_ctx, + desc->ch_cfg.atomp.surface[i].offset_hi, + desc->ch_cfg.atomp.surface[i].offset, + &desc_mem->surface[i].base_address, &desc_mem->surface[i].size, + request_unpins); + + if (err) { + dev_err(chan->dev, "%s: get atomp iova failed\n", __func__); + goto fail; + } + } + + err = capture_common_pin_and_get_iova(capture->buf_ctx, + desc->engine_status.offset_hi, + desc->engine_status.offset, + &desc_mem->engine_status_surface_base_address, + &desc_mem->engine_status_surface_size, + request_unpins); + + if (err) { + dev_err(chan->dev, "%s: get engine surf iova failed\n", __func__); + goto fail; + } + +fail: + /* Unpin cleanup is done in vi_capture_request_unpin() */ + return err; +} + +/** + * @brief Process an IOCTL call on a VI channel character device. + * + * Depending on the specific IOCTL, the argument (@a arg) may be a pointer to a + * defined struct payload that is copied from or back to user-space. This memory + * is allocated and mapped from user-space and must be kept available until + * after the IOCTL call completes. + * + * This is the @a ioctl file operation handler for a VI channel node. + * + * @param[in] file VI channel character device file struct + * @param[in] cmd VI channel IOCTL command + * @param[in,out] arg IOCTL argument; numerical value or pointer + * + * @returns 0 (success), neg. errno (failure) + */ +static long vi_channel_ioctl( + struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct tegra_vi_channel *chan = file->private_data; + struct vi_capture *capture = chan->capture_data; + void __user *ptr = (void __user *)arg; + int err = -EFAULT; + + switch (_IOC_NR(cmd)) { + case _IOC_NR(VI_CAPTURE_SETUP): { + struct vi_capture_setup setup; + + if (copy_from_user(&setup, ptr, sizeof(setup))) + break; + + if (chan->drv->use_legacy_path == false) { + vi_get_nvhost_device(chan, &setup); + if (chan->dev == NULL) { + dev_err(&chan->vi_capture_pdev->dev, + "%s: channel device is NULL", + __func__); + return -EINVAL; + } + } + + + if (setup.request_size < sizeof(struct capture_descriptor)) { + dev_err(chan->dev, + "request size is too small to fit capture descriptor\n"); + return -EINVAL; + } + + if (capture->buf_ctx) { + dev_err(chan->dev, "vi buffer setup already done"); + return -EFAULT; + } + + capture->buf_ctx = create_buffer_table(chan->dev); + if (capture->buf_ctx == NULL) { + dev_err(chan->dev, "vi buffer setup failed"); + break; + } + + /* pin the capture descriptor ring buffer */ + err = capture_common_pin_memory(capture->rtcpu_dev, + setup.mem, &capture->requests); + if (err < 0) { + dev_err(chan->dev, + "%s: memory setup failed\n", __func__); + destroy_buffer_table(capture->buf_ctx); + capture->buf_ctx = NULL; + return -EFAULT; + } + + /* Check that buffer size matches queue depth */ + if ((capture->requests.buf->size / setup.request_size) < + setup.queue_depth) { + dev_err(chan->dev, + "%s: descriptor buffer is too small for given queue depth\n", + __func__); + capture_common_unpin_memory(&capture->requests); + destroy_buffer_table(capture->buf_ctx); + capture->buf_ctx = NULL; + return -ENOMEM; + } + + setup.iova = capture->requests.iova; + err = vi_capture_setup(chan, &setup); + if (err < 0) { + dev_err(chan->dev, "vi capture setup failed\n"); + capture_common_unpin_memory(&capture->requests); + destroy_buffer_table(capture->buf_ctx); + capture->buf_ctx = NULL; + return err; + } + break; + } + + case _IOC_NR(VI_CAPTURE_RESET): { + uint32_t reset_flags; + int i; + + if (copy_from_user(&reset_flags, ptr, sizeof(reset_flags))) + break; + + err = vi_capture_reset(chan, reset_flags); + if (err < 0) + dev_err(chan->dev, "vi capture reset failed\n"); + else { + for (i = 0; i < capture->queue_depth; i++) + vi_capture_request_unpin(chan, i); + } + + break; + } + + case _IOC_NR(VI_CAPTURE_RELEASE): { + uint32_t reset_flags; + int i; + + if (copy_from_user(&reset_flags, ptr, sizeof(reset_flags))) + break; + + err = vi_capture_release(chan, reset_flags); + if (err < 0) + dev_err(chan->dev, "vi capture release failed\n"); + else { + for (i = 0; i < capture->queue_depth; i++) + vi_capture_request_unpin(chan, i); + capture_common_unpin_memory(&capture->requests); + destroy_buffer_table(capture->buf_ctx); + capture->buf_ctx = NULL; + vfree(capture->unpins_list); + capture->unpins_list = NULL; + } + + break; + } + + case _IOC_NR(VI_CAPTURE_GET_INFO): { + struct vi_capture_info info; + (void)memset(&info, 0, sizeof(info)); + + err = vi_capture_get_info(chan, &info); + if (err < 0) { + dev_err(chan->dev, "vi capture get info failed\n"); + break; + } + if (copy_to_user(ptr, &info, sizeof(info))) + err = -EFAULT; + break; + } + + case _IOC_NR(VI_CAPTURE_SET_CONFIG): { + struct vi_capture_control_msg msg; + + if (copy_from_user(&msg, ptr, sizeof(msg))) + break; + err = vi_capture_control_message_from_user(chan, &msg); + if (err < 0) + dev_err(chan->dev, "vi capture set config failed\n"); + break; + } + + case _IOC_NR(VI_CAPTURE_REQUEST): { + struct vi_capture_req req; + struct capture_common_unpins *request_unpins; + + if (copy_from_user(&req, ptr, sizeof(req))) + break; + + if (req.num_relocs == 0) { + dev_err(chan->dev, "request must have non-zero relocs\n"); + return -EINVAL; + } + + if (req.buffer_index >= capture->queue_depth) { + dev_err(chan->dev, "buffer index is out of bound\n"); + return -EINVAL; + } + + /* Don't let to speculate with invalid buffer_index value */ + spec_bar(); + + if (capture->unpins_list == NULL) { + dev_err(chan->dev, "Channel setup incomplete\n"); + return -EINVAL; + } + + mutex_lock(&capture->unpins_list_lock); + + request_unpins = &capture->unpins_list[req.buffer_index]; + + if (request_unpins->num_unpins != 0U) { + dev_err(chan->dev, "Descriptor is still in use by rtcpu\n"); + mutex_unlock(&capture->unpins_list_lock); + return -EBUSY; + } + err = pin_vi_capture_request_buffers_locked(chan, &req, + request_unpins); + + mutex_unlock(&capture->unpins_list_lock); + + if (err < 0) { + dev_err(chan->dev, + "pin request failed\n"); + vi_capture_request_unpin(chan, req.buffer_index); + break; + } + + err = vi_capture_request(chan, &req); + if (err < 0) { + dev_err(chan->dev, + "vi capture request submit failed\n"); + vi_capture_request_unpin(chan, req.buffer_index); + } + + break; + } + + case _IOC_NR(VI_CAPTURE_STATUS): { + uint32_t timeout_ms; + + if (copy_from_user(&timeout_ms, ptr, sizeof(timeout_ms))) + break; + err = vi_capture_status(chan, timeout_ms); + if (err < 0) + dev_err(chan->dev, + "vi capture get status failed\n"); + break; + } + + case _IOC_NR(VI_CAPTURE_SET_PROGRESS_STATUS_NOTIFIER): { + struct vi_capture_progress_status_req req; + + if (copy_from_user(&req, ptr, sizeof(req))) + break; + err = vi_capture_set_progress_status_notifier(chan, &req); + if (err < 0) + dev_err(chan->dev, + "setting progress status buffer failed\n"); + break; + } + + case _IOC_NR(VI_CAPTURE_BUFFER_REQUEST): { + struct vi_buffer_req req; + + if (copy_from_user(&req, ptr, sizeof(req)) != 0U) + break; + + err = capture_buffer_request( + capture->buf_ctx, req.mem, req.flag); + if (err < 0) + dev_err(chan->dev, "vi buffer request failed\n"); + break; + } + + default: { + dev_err(chan->dev, "%s:Unknown ioctl\n", __func__); + return -ENOIOCTLCMD; + } + } + + return err; +} + +static const struct file_operations vi_channel_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = vi_channel_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vi_channel_ioctl, +#endif + .open = vi_channel_open, + .release = vi_channel_release, +}; + +/* Character device */ +static struct class *vi_channel_class; +static int vi_channel_major; + +int vi_channel_drv_register( + struct platform_device *ndev, + unsigned int max_vi_channels) +{ + struct vi_channel_drv *chan_drv; + int err = 0; + unsigned int i; + + chan_drv = devm_kzalloc(&ndev->dev, sizeof(*chan_drv) + + max_vi_channels * sizeof(struct tegra_vi_channel *), + GFP_KERNEL); + if (unlikely(chan_drv == NULL)) + return -ENOMEM; + + if (strstr(ndev->name, "tegra-capture-vi") == NULL) { + chan_drv->use_legacy_path = true; + chan_drv->dev = &ndev->dev; + chan_drv->ndev = ndev; + } else { + chan_drv->use_legacy_path = false; + chan_drv->dev = NULL; + chan_drv->ndev = NULL; + chan_drv->vi_capture_pdev = ndev; + } + chan_drv->num_channels = max_vi_channels; + mutex_init(&chan_drv->lock); + + mutex_lock(&chdrv_lock); + if (chdrv_ != NULL) { + mutex_unlock(&chdrv_lock); + WARN_ON(1); + err = -EBUSY; + goto error; + } + chdrv_ = chan_drv; + mutex_unlock(&chdrv_lock); + + for (i = 0; i < chan_drv->num_channels; i++) { + dev_t devt = MKDEV(vi_channel_major, i); + + struct device *dev = + (chan_drv->use_legacy_path)?chan_drv->dev : + &chan_drv->vi_capture_pdev->dev; + device_create(vi_channel_class, dev, devt, NULL, + "capture-vi-channel%u", i); + } + + return 0; + +error: + return err; +} +EXPORT_SYMBOL(vi_channel_drv_register); + +int vi_channel_drv_fops_register( + const struct vi_channel_drv_ops *ops) +{ + int err = 0; + struct vi_channel_drv *chan_drv; + + chan_drv = chdrv_; + if (chan_drv == NULL) { + err = -EPROBE_DEFER; + goto error; + } + + mutex_lock(&chdrv_lock); + if (chan_drv->ops == NULL) + chan_drv->ops = ops; + else + dev_warn(chan_drv->dev, "fops function table already registered\n"); + mutex_unlock(&chdrv_lock); + + return 0; + +error: + return err; +} +EXPORT_SYMBOL(vi_channel_drv_fops_register); + +void vi_channel_drv_unregister( + struct device *dev) +{ + struct vi_channel_drv *chan_drv; + unsigned int i; + + mutex_lock(&chdrv_lock); + chan_drv = chdrv_; + chdrv_ = NULL; + WARN_ON(chan_drv->dev != dev); + mutex_unlock(&chdrv_lock); + + for (i = 0; i < chan_drv->num_channels; i++) { + dev_t devt = MKDEV(vi_channel_major, i); + + device_destroy(vi_channel_class, devt); + } + + devm_kfree(chan_drv->dev, chan_drv); +} +EXPORT_SYMBOL(vi_channel_drv_unregister); + +/** + * @brief Initialize the VI channel driver device (major). + * + * @returns 0 (success), PTR_ERR or neg. VI channel major no. (failure) + */ +int vi_channel_drv_init(void) +{ + vi_channel_class = class_create(THIS_MODULE, "capture-vi-channel"); + if (IS_ERR(vi_channel_class)) + return PTR_ERR(vi_channel_class); + + vi_channel_major = register_chrdev(0, "capture-vi-channel", + &vi_channel_fops); + if (vi_channel_major < 0) { + class_destroy(vi_channel_class); + return vi_channel_major; + } + + return 0; +} + +/** + * @brief De-initialize the VI channel driver device (major). + */ +void vi_channel_drv_exit(void) +{ + unregister_chrdev(vi_channel_major, "capture-vi-channel"); + class_destroy(vi_channel_class); +} diff --git a/drivers/media/platform/tegra/camera/fusa-capture/capture-vi.c b/drivers/media/platform/tegra/camera/fusa-capture/capture-vi.c new file mode 100644 index 00000000..35883e4e --- /dev/null +++ b/drivers/media/platform/tegra/camera/fusa-capture/capture-vi.c @@ -0,0 +1,1751 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + +/** + * @file drivers/media/platform/tegra/camera/fusa-capture/capture-vi.c + * + * @brief VI channel operations for the T186/T194 Camera RTCPU platform. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "soc/tegra/camrtc-capture.h" +#include "soc/tegra/camrtc-capture-messages.h" +#include +#include + +#include +#include +#include +#include +#include "camera/vi/vi5_fops.h" + +/** + * @brief Invalid VI channel ID; the channel is not initialized. + */ +#define CAPTURE_CHANNEL_INVALID_ID U16_C(0xFFFF) + +/** + * @brief Invalid VI channel mask; no channels are allocated. + */ +#define CAPTURE_CHANNEL_INVALID_MASK U64_C(0x0) + +/** + * @brief Invalid NVCSI stream ID; the stream is not initialized. + */ +#define NVCSI_STREAM_INVALID_ID U32_C(0xFFFF) + +/** + * @brief INVALID NVCSI TPG virtual channel ID; the TPG stream is not enabled. + */ +#define NVCSI_STREAM_INVALID_TPG_VC_ID U32_C(0xFFFF) + +/** + * @brief The default number of VI channels to be used if not specified in + * the device tree. + */ +#define DEFAULT_VI_CHANNELS U32_C(64) + +/** + * @brief Maximum number of VI devices supported. + */ +#define MAX_VI_UNITS U32_C(0x2) + +/** + * @brief Invalid VI unit ID, to initialize vi-mapping table before parsing DT. + */ +#define INVALID_VI_UNIT_ID U32_C(0xFFFF) + +/** + * @brief Maximum number of NVCSI streams supported. + */ +#define MAX_NVCSI_STREAM_IDS U32_C(0x6) + +/** + * @brief Maximum number of virtual channel supported per stream. + */ +#define MAX_VIRTUAL_CHANNEL_PER_STREAM U32_C(16) + +/** + * @brief A 2-D array for storing all possible tegra_vi_channel struct pointers. + */ +static struct tegra_vi_channel *channels[MAX_NVCSI_STREAM_IDS][MAX_VIRTUAL_CHANNEL_PER_STREAM]; +/** + * @brief Names of VI-unit and CSI-stream mapping elements in device-tree node + */ +static const char * const vi_mapping_elements[] = { + "csi-stream-id", + "vi-unit-id" +}; + +/** + * @brief The Capture-VI standalone driver context. + */ +struct tegra_capture_vi_data { + struct vi vi_common; /**< VI device context */ + uint32_t num_vi_devices; /**< Number of available VI devices */ + struct platform_device *vi_pdevices[MAX_VI_UNITS]; + /**< VI nvhost client platform device for each VI instance */ + uint32_t max_vi_channels; + /**< Maximum number of VI capture channel devices */ + uint32_t num_csi_vi_maps; + /**< Number of NVCSI to VI mapping elements in the table */ + uint32_t vi_instance_table[MAX_NVCSI_STREAM_IDS]; + /**< NVCSI stream-id & VI instance mapping, read from the DT */ +}; + +/** + * @brief Initialize a VI syncpoint and get its GoS backing. + * + * @param[in] chan VI channel context + * @param[in] name Syncpoint name + * @param[in] enable Whether to initialize or just clear @a sp + * @param[out] sp Syncpoint handle + * + * @returns 0 (success), neg. errno (failure) + */ +static int vi_capture_setup_syncpt( + struct tegra_vi_channel *chan, + const char *name, + bool enable, + struct syncpoint_info *sp) +{ + struct platform_device *pdev = chan->ndev; + uint32_t gos_index, gos_offset; + int err; + + memset(sp, 0, sizeof(*sp)); + sp->gos_index = GOS_INDEX_INVALID; + + if (!enable) + return 0; + + err = chan->ops->alloc_syncpt(pdev, name, &sp->id); + if (err) + return err; + + err = nvhost_syncpt_read_ext_check(pdev, sp->id, &sp->threshold); + if (err) + goto cleanup; + + err = chan->ops->get_syncpt_gos_backing(pdev, sp->id, &sp->shim_addr, + &gos_index, &gos_offset); + if (err) + goto cleanup; + + sp->gos_index = gos_index; + sp->gos_offset = gos_offset; + + return 0; + +cleanup: + chan->ops->release_syncpt(pdev, sp->id); + memset(sp, 0, sizeof(*sp)); + + return err; +} + +/** + * @brief Release a VI syncpoint and clear its handle. + * + * @param[in] chan VI channel context + * @param[out] sp Syncpoint handle + */ +static void vi_capture_release_syncpt( + struct tegra_vi_channel *chan, + struct syncpoint_info *sp) +{ + if (sp->id) + chan->ops->release_syncpt(chan->ndev, sp->id); + + memset(sp, 0, sizeof(*sp)); +} + +/** + * @brief Release the VI channel progress, embedded data and line timer + * syncpoints. + * + * @param[in] chan VI channel context + */ +static void vi_capture_release_syncpts( + struct tegra_vi_channel *chan) +{ + struct vi_capture *capture = chan->capture_data; + + vi_capture_release_syncpt(chan, &capture->progress_sp); + vi_capture_release_syncpt(chan, &capture->embdata_sp); + vi_capture_release_syncpt(chan, &capture->linetimer_sp); +} + +/** + * @brief Set up the VI channel progress, embedded data and line timer + * syncpoints. + * + * @param[in] chan VI channel context + * @param[in] flags Bitmask for channel flags, see + * @ref CAPTURE_CHANNEL_FLAGS + * + * @returns 0 (success), neg. errno (failure) + */ +static int vi_capture_setup_syncpts( + struct tegra_vi_channel *chan, + uint32_t flags) +{ + struct vi_capture *capture = chan->capture_data; + int err = 0; + + chan->ops->get_gos_table(chan->ndev, + &capture->num_gos_tables, + &capture->gos_tables); + + err = vi_capture_setup_syncpt(chan, "progress", true, + &capture->progress_sp); + if (err < 0) + goto fail; + + err = vi_capture_setup_syncpt(chan, "embdata", + (flags & CAPTURE_CHANNEL_FLAG_EMBDATA) != 0, + &capture->embdata_sp); + if (err < 0) + goto fail; + + err = vi_capture_setup_syncpt(chan, "linetimer", + (flags & CAPTURE_CHANNEL_FLAG_LINETIMER) != 0, + &capture->linetimer_sp); + if (err < 0) + goto fail; + + return 0; + +fail: + vi_capture_release_syncpts(chan); + return err; +} + +/** + * @brief Read the value of a VI channel syncpoint. + * + * @param[in] chan VI channel context + * @param[in] sp Syncpoint handle + * @param[out] val Syncpoint value + * + * @returns 0 (success), neg. errno (failure) + */ +static int vi_capture_read_syncpt( + struct tegra_vi_channel *chan, + struct syncpoint_info *sp, + uint32_t *val) +{ + int err; + + if (sp->id) { + err = nvhost_syncpt_read_ext_check(chan->ndev, + sp->id, val); + if (err < 0) { + dev_err(chan->dev, + "%s: get syncpt %i val failed\n", __func__, + sp->id); + return -EINVAL; + } + } + + return 0; +} + +/** + * @brief VI channel callback function for @em capture IVC messages. + * + * @param[in] ivc_resp IVC @ref CAPTURE_MSG from RCE + * @param[in] pcontext VI channel capture context + */ +static void vi_capture_ivc_status_callback( + const void *ivc_resp, + const void *pcontext) +{ + struct CAPTURE_MSG *status_msg = (struct CAPTURE_MSG *)ivc_resp; + struct vi_capture *capture = (struct vi_capture *)pcontext; + struct tegra_vi_channel *chan = capture->vi_channel; + uint32_t buffer_index; + + if (unlikely(capture == NULL)) { + dev_err(chan->dev, "%s: invalid context", __func__); + return; + } + + if (unlikely(status_msg == NULL)) { + dev_err(chan->dev, "%s: invalid response", __func__); + return; + } + + switch (status_msg->header.msg_id) { + case CAPTURE_STATUS_IND: + buffer_index = status_msg->capture_status_ind.buffer_index; + if (capture->is_mem_pinned) + vi_capture_request_unpin(chan, buffer_index); + dma_sync_single_range_for_cpu(capture->rtcpu_dev, + capture->requests.iova, + buffer_index * capture->request_size, + capture->request_size, DMA_FROM_DEVICE); + + if (capture->is_progress_status_notifier_set) { + capture_common_set_progress_status( + &capture->progress_status_notifier, + buffer_index, + capture->progress_status_buffer_depth, + PROGRESS_STATUS_DONE); + } else { + /* + * Only fire completions if not using + * the new progress status buffer mechanism + */ + complete(&capture->capture_resp); + } + dev_dbg(chan->dev, "%s: status chan_id %u msg_id %u\n", + __func__, status_msg->header.channel_id, + status_msg->header.msg_id); + break; + default: + dev_err(chan->dev, + "%s: unknown capture resp", __func__); + break; + } +} + +/** + * @brief Send a @em capture-control IVC message to RCE on a VI channel, and + * block w/ timeout, waiting for the RCE response. + * + * @param[in] chan VI channel context + * @param[in] msg IVC message payload + * @param[in] size Size of @a msg [byte] + * @param[in] resp_id IVC message identifier, see @CAPTURE_MSG_IDS + * + * @returns 0 (success), neg. errno (failure) + */ +static int vi_capture_ivc_send_control( + struct tegra_vi_channel *chan, + const struct CAPTURE_CONTROL_MSG *msg, + size_t size, + uint32_t resp_id) +{ + struct vi_capture *capture = chan->capture_data; + struct CAPTURE_MSG_HEADER resp_header = msg->header; + uint32_t timeout = HZ; + int err = 0; + + dev_dbg(chan->dev, "%s: sending chan_id %u msg_id %u\n", + __func__, resp_header.channel_id, resp_header.msg_id); + resp_header.msg_id = resp_id; + /* Send capture control IVC message */ + mutex_lock(&capture->control_msg_lock); + err = tegra_capture_ivc_control_submit(msg, size); + if (err < 0) { + dev_err(chan->dev, "IVC control submit failed\n"); + goto fail; + } + + timeout = wait_for_completion_timeout( + &capture->control_resp, timeout); + if (timeout <= 0) { + dev_err(chan->dev, + "capture control message timed out\n"); + err = -ETIMEDOUT; + goto fail; + } + + if (memcmp(&resp_header, &capture->control_resp_msg.header, + sizeof(resp_header)) != 0) { + dev_err(chan->dev, + "unexpected response from camera processor\n"); + err = -EINVAL; + goto fail; + } + + mutex_unlock(&capture->control_msg_lock); + dev_dbg(chan->dev, "%s: response chan_id %u msg_id %u\n", + __func__, capture->control_resp_msg.header.channel_id, + capture->control_resp_msg.header.msg_id); + return 0; + +fail: + mutex_unlock(&capture->control_msg_lock); + return err; +} + +/** + * @brief VI channel callback function for @em capture-control IVC messages, + * this unblocks the channel's @em capture-control completion. + * + * @param[in] ivc_resp IVC @ref CAPTURE_CONTROL_MSG from RCE + * @param[in] pcontext VI channel capture context + */ +static void vi_capture_ivc_control_callback( + const void *ivc_resp, + const void *pcontext) +{ + const struct CAPTURE_CONTROL_MSG *control_msg = ivc_resp; + struct vi_capture *capture = (struct vi_capture *)pcontext; + struct tegra_vi_channel *chan = capture->vi_channel; + + if (unlikely(capture == NULL)) { + dev_err(chan->dev, "%s: invalid context", __func__); + return; + } + + if (unlikely(control_msg == NULL)) { + dev_err(chan->dev, "%s: invalid response", __func__); + return; + } + + switch (control_msg->header.msg_id) { + case CAPTURE_CHANNEL_SETUP_RESP: + case CAPTURE_CHANNEL_RESET_RESP: + case CAPTURE_CHANNEL_RELEASE_RESP: + case CAPTURE_COMPAND_CONFIG_RESP: + case CAPTURE_PDAF_CONFIG_RESP: + case CAPTURE_SYNCGEN_ENABLE_RESP: + case CAPTURE_SYNCGEN_DISABLE_RESP: + case CAPTURE_PHY_STREAM_OPEN_RESP: + case CAPTURE_PHY_STREAM_CLOSE_RESP: + case CAPTURE_PHY_STREAM_DUMPREGS_RESP: + case CAPTURE_CSI_STREAM_SET_CONFIG_RESP: + case CAPTURE_CSI_STREAM_SET_PARAM_RESP: + case CAPTURE_CSI_STREAM_TPG_SET_CONFIG_RESP: + case CAPTURE_CSI_STREAM_TPG_START_RESP: + case CAPTURE_CSI_STREAM_TPG_START_RATE_RESP: + case CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_RESP: + case CAPTURE_CSI_STREAM_TPG_STOP_RESP: + case CAPTURE_CHANNEL_EI_RESP: + case CAPTURE_HSM_CHANSEL_ERROR_MASK_RESP: + memcpy(&capture->control_resp_msg, control_msg, + sizeof(*control_msg)); + complete(&capture->control_resp); + break; + default: + dev_err(chan->dev, + "%s: unknown capture control resp 0x%x", __func__, + control_msg->header.msg_id); + break; + } +} + +int vi_capture_init( + struct tegra_vi_channel *chan, + bool is_mem_pinned) +{ + struct vi_capture *capture; + struct device_node *dn; + struct platform_device *rtc_pdev; + struct device *dev; + + if (chan->drv->use_legacy_path) + dev = chan->dev; + else + dev = &chan->vi_capture_pdev->dev; + + dev_dbg(dev, "%s++\n", __func__); + dn = of_find_node_by_path("tegra-camera-rtcpu"); + if (of_device_is_available(dn) == 0) { + dev_err(dev, "failed to find rtcpu device node\n"); + return -ENODEV; + } + rtc_pdev = of_find_device_by_node(dn); + if (rtc_pdev == NULL) { + dev_err(dev, "failed to find rtcpu platform\n"); + return -ENODEV; + } + + capture = kzalloc(sizeof(*capture), GFP_KERNEL); + if (unlikely(capture == NULL)) { + dev_err(dev, "failed to allocate capture channel\n"); + return -ENOMEM; + } + + capture->rtcpu_dev = &rtc_pdev->dev; + + init_completion(&capture->control_resp); + init_completion(&capture->capture_resp); + + mutex_init(&capture->reset_lock); + mutex_init(&capture->control_msg_lock); + mutex_init(&capture->unpins_list_lock); + + capture->vi_channel = chan; + chan->capture_data = capture; + chan->rtcpu_dev = capture->rtcpu_dev; + + capture->is_mem_pinned = is_mem_pinned; + capture->channel_id = CAPTURE_CHANNEL_INVALID_ID; + + capture->stream_id = NVCSI_STREAM_INVALID_ID; + capture->csi_port = NVCSI_PORT_UNSPECIFIED; + capture->virtual_channel_id = NVCSI_STREAM_INVALID_TPG_VC_ID; + + return 0; +} +EXPORT_SYMBOL_GPL(vi_capture_init); + +void vi_capture_shutdown( + struct tegra_vi_channel *chan) +{ + struct vi_capture *capture = chan->capture_data; + + dev_dbg(chan->dev, "%s--\n", __func__); + if (capture == NULL) + return; + + if (capture->channel_id != CAPTURE_CHANNEL_INVALID_ID) + vi_capture_reset(chan, + CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE); + + if (capture->stream_id != NVCSI_STREAM_INVALID_ID) + csi_stream_release(chan); + + if (capture->channel_id != CAPTURE_CHANNEL_INVALID_ID) { + int i; + + vi_capture_release(chan, + CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE); + + if (capture->is_mem_pinned) { + for (i = 0; i < capture->queue_depth; i++) + vi_capture_request_unpin(chan, i); + } + capture_common_unpin_memory(&capture->requests); + if (capture->buf_ctx != NULL) { + destroy_buffer_table(capture->buf_ctx); + capture->buf_ctx = NULL; + } + + vfree(capture->unpins_list); + capture->unpins_list = NULL; + } + kfree(capture); + chan->capture_data = NULL; +} +EXPORT_SYMBOL_GPL(vi_capture_shutdown); + +void vi_get_nvhost_device( + struct tegra_vi_channel *chan, + struct vi_capture_setup *setup) +{ + uint32_t vi_inst = 0; + + struct tegra_capture_vi_data *info = + platform_get_drvdata(chan->vi_capture_pdev); + + vi_inst = info->vi_instance_table[setup->csi_stream_id]; + + if (vi_inst >= MAX_VI_UNITS) { + dev_err(&chan->vi_capture_pdev->dev, "Invalid VI device Id\n"); + chan->dev = NULL; + chan->ndev = NULL; + return; + } + vi_inst = array_index_nospec(vi_inst, MAX_VI_UNITS); + + chan->dev = &info->vi_pdevices[vi_inst]->dev; + chan->ndev = info->vi_pdevices[vi_inst]; +} +EXPORT_SYMBOL_GPL(vi_get_nvhost_device); + +struct device *vi_csi_stream_to_nvhost_device( + struct platform_device *pdev, + uint32_t csi_stream_id) +{ + struct tegra_capture_vi_data *info = platform_get_drvdata(pdev); + uint32_t vi_inst_id = 0; + + if (csi_stream_id >= MAX_NVCSI_STREAM_IDS) { + dev_err(&pdev->dev, "Invalid NVCSI stream Id\n"); + return NULL; + } + + vi_inst_id = info->vi_instance_table[csi_stream_id]; + return &info->vi_pdevices[vi_inst_id]->dev; +} +EXPORT_SYMBOL(vi_csi_stream_to_nvhost_device); + +int vi_capture_setup( + struct tegra_vi_channel *chan, + struct vi_capture_setup *setup) +{ + struct vi_capture *capture = chan->capture_data; + struct tegra_capture_vi_data *info; + uint32_t transaction; + struct CAPTURE_CONTROL_MSG control_desc; + struct CAPTURE_CONTROL_MSG *resp_msg = &capture->control_resp_msg; + struct capture_channel_config *config = + &control_desc.channel_setup_req.channel_config; + int err = 0; +#ifdef HAVE_VI_GOS_TABLES + int i; +#endif + + uint32_t vi_inst = 0; + struct device *dev; + + if (chan->drv->use_legacy_path) + dev = chan->dev; + else + dev = &chan->vi_capture_pdev->dev; + + if (setup->csi_stream_id >= MAX_NVCSI_STREAM_IDS || + setup->virtual_channel_id >= MAX_VIRTUAL_CHANNEL_PER_STREAM) { + dev_err(dev, "Invalid stream id or virtual channel id\n"); + return -EINVAL; + } + + if (chan->vi_capture_pdev == NULL) { + dev_err(dev, + "%s: channel capture device is NULL", __func__); + return -EINVAL; + } + + info = platform_get_drvdata(chan->vi_capture_pdev); + vi_inst = info->vi_instance_table[setup->csi_stream_id]; + + /* V4L2 directly calls this function. So need to make sure the + * correct VI5 instance is associated with the VI capture channel. + */ + if (chan->dev == NULL) { + vi_get_nvhost_device(chan, setup); + if (chan->dev == NULL) { + dev_err(&chan->vi_capture_pdev->dev, + "%s: channel device is NULL", __func__); + return -EINVAL; + } + } + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_SETUP); + + if (setup->mem == 0 && setup->iova == 0) { + dev_err(chan->dev, + "%s: request buffer is NULL\n", __func__); + return -EINVAL; + } + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id != CAPTURE_CHANNEL_INVALID_ID) { + dev_err(chan->dev, + "%s: already setup, release first\n", __func__); + return -EEXIST; + } + + dev_dbg(chan->dev, "chan flags %u\n", setup->channel_flags); + dev_dbg(chan->dev, "chan mask %llx\n", setup->vi_channel_mask); + dev_dbg(chan->dev, "queue depth %u\n", setup->queue_depth); + dev_dbg(chan->dev, "request size %u\n", setup->request_size); + dev_dbg(chan->dev, "csi_stream_id %u\n", setup->csi_stream_id); + dev_dbg(chan->dev, "vi unit id %u\n", vi_inst); + dev_dbg(chan->dev, "vi2 chan mask %llx\n", setup->vi2_channel_mask); + + if (WARN_ON(vi_inst == VI_UNIT_VI && + setup->vi_channel_mask == CAPTURE_CHANNEL_INVALID_MASK) || + WARN_ON(vi_inst == VI_UNIT_VI2 && + setup->vi2_channel_mask == CAPTURE_CHANNEL_INVALID_MASK) || + WARN_ON(setup->channel_flags == 0) || + WARN_ON(setup->queue_depth == 0) || + WARN_ON(setup->request_size == 0) || + WARN_ON(setup->csi_stream_id == NVCSI_STREAM_INVALID_ID)) { + + dev_err(chan->dev, "%s: invalid setup parameters\n", __func__); + return -EINVAL; + } + + capture->queue_depth = setup->queue_depth; + capture->request_size = setup->request_size; + capture->request_buf_size = setup->request_size * setup->queue_depth; + + capture->stream_id = setup->csi_stream_id; + capture->csi_port = setup->csi_port; + capture->virtual_channel_id = setup->virtual_channel_id; + + err = vi_capture_setup_syncpts(chan, setup->channel_flags); + if (err < 0) { + dev_err(chan->dev, "failed to setup syncpts\n"); + goto syncpt_fail; + } + + err = tegra_capture_ivc_register_control_cb( + &vi_capture_ivc_control_callback, + &transaction, capture); + if (err < 0) { + dev_err(chan->dev, "failed to register control callback\n"); + goto control_cb_fail; + } + + memset(&control_desc, 0, sizeof(control_desc)); + control_desc.header.msg_id = CAPTURE_CHANNEL_SETUP_REQ; + control_desc.header.transaction = transaction; + + /* Allocate memoryinfo ringbuffer */ + capture->requests_memoryinfo = dma_alloc_coherent(capture->rtcpu_dev, + setup->queue_depth * sizeof(*capture->requests_memoryinfo), + &capture->requests_memoryinfo_iova, GFP_KERNEL); + + if (!capture->requests_memoryinfo) { + dev_err(chan->dev, + "%s: memoryinfo ringbuffer alloc failed\n", __func__); + goto memoryinfo_alloc_fail; + } + + WARN_ON(capture->unpins_list != NULL); + + capture->unpins_list = + vzalloc(setup->queue_depth * sizeof(*capture->unpins_list)); + + if (!capture->unpins_list) { + dev_err(chan->dev, + "%s: channel_unpins alloc failed\n", __func__); + goto unpin_alloc_fail; + } + + config->requests_memoryinfo = capture->requests_memoryinfo_iova; + config->request_memoryinfo_size = + sizeof(struct capture_descriptor_memoryinfo); + + config->channel_flags = setup->channel_flags; + config->vi_channel_mask = setup->vi_channel_mask; + config->vi2_channel_mask = setup->vi2_channel_mask; + config->slvsec_stream_main = setup->slvsec_stream_main; + config->slvsec_stream_sub = setup->slvsec_stream_sub; + + config->vi_unit_id = vi_inst; + + config->csi_stream.stream_id = setup->csi_stream_id; + config->csi_stream.csi_port = setup->csi_port; + config->csi_stream.virtual_channel = setup->virtual_channel_id; + + config->queue_depth = setup->queue_depth; + config->request_size = setup->request_size; + config->requests = setup->iova; + + config->error_mask_correctable = setup->error_mask_correctable; + config->error_mask_uncorrectable = setup->error_mask_uncorrectable; + config->stop_on_error_notify_bits = setup->stop_on_error_notify_bits; + +#ifdef HAVE_VI_GOS_TABLES + dev_dbg(chan->dev, "%u GoS tables configured.\n", + capture->num_gos_tables); + for (i = 0; i < capture->num_gos_tables; i++) { + config->vi_gos_tables[i] = (iova_t)capture->gos_tables[i]; + dev_dbg(chan->dev, "gos[%d] = 0x%08llx\n", + i, (u64)capture->gos_tables[i]); + } + config->num_vi_gos_tables = capture->num_gos_tables; +#endif + + config->progress_sp = capture->progress_sp; + config->embdata_sp = capture->embdata_sp; + config->linetimer_sp = capture->linetimer_sp; + + err = vi_capture_ivc_send_control(chan, &control_desc, + sizeof(control_desc), CAPTURE_CHANNEL_SETUP_RESP); + if (err < 0) + goto submit_fail; + + if (resp_msg->channel_setup_resp.result != CAPTURE_OK) { + dev_err(chan->dev, "%s: control failed, errno %d", __func__, + resp_msg->channel_setup_resp.result); + err = -EINVAL; + goto resp_fail; + } + + capture->channel_id = resp_msg->channel_setup_resp.channel_id; + + if (vi_inst == VI_UNIT_VI) + capture->vi_channel_mask = + resp_msg->channel_setup_resp.vi_channel_mask; + else if (vi_inst == VI_UNIT_VI2) + capture->vi2_channel_mask = + resp_msg->channel_setup_resp.vi_channel_mask; + else { + dev_err(chan->dev, "failed response for vi:%u\n", vi_inst); + err = -EINVAL; + goto resp_fail; + } + + + err = tegra_capture_ivc_notify_chan_id(capture->channel_id, + transaction); + if (err < 0) { + dev_err(chan->dev, "failed to update control callback\n"); + goto cb_fail; + } + + err = tegra_capture_ivc_register_capture_cb( + &vi_capture_ivc_status_callback, + capture->channel_id, capture); + if (err < 0) { + dev_err(chan->dev, "failed to register capture callback\n"); + goto cb_fail; + } + + channels[setup->csi_stream_id][setup->virtual_channel_id] = chan; + + return 0; + +cb_fail: +resp_fail: +submit_fail: + vfree(capture->unpins_list); + capture->unpins_list = NULL; +unpin_alloc_fail: + /* Release memoryinfo ringbuffer */ + dma_free_coherent(capture->rtcpu_dev, + capture->queue_depth * + sizeof(struct capture_descriptor_memoryinfo), + capture->requests_memoryinfo, + capture->requests_memoryinfo_iova); + capture->requests_memoryinfo = NULL; +memoryinfo_alloc_fail: + tegra_capture_ivc_unregister_control_cb(transaction); +control_cb_fail: + vi_capture_release_syncpts(chan); +syncpt_fail: + return err; +} +EXPORT_SYMBOL_GPL(vi_capture_setup); + +struct tegra_vi_channel *get_tegra_vi_channel( + unsigned int stream_id, + unsigned int virtual_channel_id) +{ + if (stream_id >= MAX_NVCSI_STREAM_IDS || virtual_channel_id >= MAX_VIRTUAL_CHANNEL_PER_STREAM) + return NULL; + + return channels[stream_id][virtual_channel_id]; +} + +int vi_capture_reset( + struct tegra_vi_channel *chan, + uint32_t reset_flags) +{ + struct vi_capture *capture = chan->capture_data; + struct CAPTURE_CONTROL_MSG control_desc; +#ifdef CAPTURE_RESET_BARRIER_IND + struct CAPTURE_MSG capture_desc; +#endif + struct CAPTURE_CONTROL_MSG *resp_msg = &capture->control_resp_msg; + int err = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_RESET); + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_INVALID_ID) { + dev_err(chan->dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + mutex_lock(&capture->reset_lock); + +#ifdef CAPTURE_RESET_BARRIER_IND + memset(&capture_desc, 0, sizeof(capture_desc)); + capture_desc.header.msg_id = CAPTURE_RESET_BARRIER_IND; + capture_desc.header.channel_id = capture->channel_id; + err = tegra_capture_ivc_capture_submit(&capture_desc, + sizeof(capture_desc)); + if (err < 0) { + dev_err(chan->dev, "%s:IVC capture submit failed\n", __func__); + goto submit_fail; + } +#endif + + memset(&control_desc, 0, sizeof(control_desc)); + control_desc.header.msg_id = CAPTURE_CHANNEL_RESET_REQ; + control_desc.header.channel_id = capture->channel_id; + control_desc.channel_reset_req.reset_flags = reset_flags; + + err = vi_capture_ivc_send_control(chan, &control_desc, + sizeof(control_desc), CAPTURE_CHANNEL_RESET_RESP); + if (err < 0) + goto submit_fail; + +#ifdef CAPTURE_RESET_BARRIER_IND + if (resp_msg->channel_reset_resp.result == CAPTURE_ERROR_TIMEOUT) { + dev_dbg(chan->dev, "%s:reset timeout\n", __func__); + err = -EAGAIN; + goto submit_fail; + } +#endif + + if (resp_msg->channel_reset_resp.result != CAPTURE_OK) { + dev_err(chan->dev, "%s: control failed, errno %d", __func__, + resp_msg->channel_reset_resp.result); + err = -EINVAL; + } + +submit_fail: + mutex_unlock(&capture->reset_lock); + return err; +} +EXPORT_SYMBOL_GPL(vi_capture_reset); + +int vi_capture_release( + struct tegra_vi_channel *chan, + uint32_t reset_flags) +{ + struct vi_capture *capture = chan->capture_data; + struct CAPTURE_CONTROL_MSG control_desc; + struct CAPTURE_CONTROL_MSG *resp_msg = &capture->control_resp_msg; + int err = 0; + int ret = 0; + int i = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_RELEASE); + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_INVALID_ID) { + dev_err(chan->dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + + } + + memset(&control_desc, 0, sizeof(control_desc)); + control_desc.header.msg_id = CAPTURE_CHANNEL_RELEASE_REQ; + control_desc.header.channel_id = capture->channel_id; + control_desc.channel_release_req.reset_flags = reset_flags; + + err = vi_capture_ivc_send_control(chan, &control_desc, + sizeof(control_desc), CAPTURE_CHANNEL_RELEASE_RESP); + if (err < 0) { + dev_err(chan->dev, + "%s: release channel IVC failed\n", __func__); + WARN_ON("RTCPU is in a bad state. Reboot to recover"); + + tegra_camrtc_reboot(chan->rtcpu_dev); + + err = -EIO; + } else if (resp_msg->channel_release_resp.result != CAPTURE_OK) { + dev_err(chan->dev, "%s: control failed, errno %d", __func__, + resp_msg->channel_release_resp.result); + err = -EIO; + } + + if (capture->requests_memoryinfo) { + /* Release memoryinfo ringbuffer */ + dma_free_coherent(capture->rtcpu_dev, + capture->queue_depth * sizeof(struct capture_descriptor_memoryinfo), + capture->requests_memoryinfo, capture->requests_memoryinfo_iova); + capture->requests_memoryinfo = NULL; + } + + ret = tegra_capture_ivc_unregister_capture_cb(capture->channel_id); + if (ret < 0 && err == 0) { + dev_err(chan->dev, + "failed to unregister capture callback\n"); + err = ret; + } + + ret = tegra_capture_ivc_unregister_control_cb(capture->channel_id); + if (ret < 0 && err == 0) { + dev_err(chan->dev, + "failed to unregister control callback\n"); + err = ret; + } + + for (i = 0; i < capture->queue_depth; i++) + complete(&capture->capture_resp); + + vi_capture_release_syncpts(chan); + + if (capture->stream_id < MAX_NVCSI_STREAM_IDS && + capture->virtual_channel_id < MAX_VIRTUAL_CHANNEL_PER_STREAM) { + channels[capture->stream_id][capture->virtual_channel_id] = NULL; + } + + capture->channel_id = CAPTURE_CHANNEL_INVALID_ID; + capture->stream_id = NVCSI_STREAM_INVALID_ID; + capture->csi_port = NVCSI_PORT_UNSPECIFIED; + capture->virtual_channel_id = NVCSI_STREAM_INVALID_TPG_VC_ID; + + if (capture->is_progress_status_notifier_set) + capture_common_release_progress_status_notifier( + &capture->progress_status_notifier); + + return err; +} +EXPORT_SYMBOL_GPL(vi_capture_release); + +static int vi_capture_control_send_message( + struct tegra_vi_channel *chan, + const struct CAPTURE_CONTROL_MSG *msg_cpy, + size_t size) +{ + int err = 0; + struct vi_capture *capture = chan->capture_data; + struct CAPTURE_MSG_HEADER *header; + uint32_t resp_id; + + header = (struct CAPTURE_MSG_HEADER *)msg_cpy; + header->channel_id = capture->channel_id; + + switch (header->msg_id) { + case CAPTURE_COMPAND_CONFIG_REQ: + resp_id = CAPTURE_COMPAND_CONFIG_RESP; + break; + case CAPTURE_PDAF_CONFIG_REQ: + resp_id = CAPTURE_PDAF_CONFIG_RESP; + break; + case CAPTURE_SYNCGEN_ENABLE_REQ: + resp_id = CAPTURE_SYNCGEN_ENABLE_RESP; + break; + case CAPTURE_SYNCGEN_DISABLE_REQ: + resp_id = CAPTURE_SYNCGEN_DISABLE_RESP; + break; + case CAPTURE_PHY_STREAM_OPEN_REQ: + if (chan->is_stream_opened) { + dev_dbg(chan->dev, + "%s: NVCSI stream is already opened for this VI channel", + __func__); + return 0; + } + resp_id = CAPTURE_PHY_STREAM_OPEN_RESP; + capture->stream_id = msg_cpy->phy_stream_open_req.stream_id; + capture->csi_port = msg_cpy->phy_stream_open_req.csi_port; + break; + case CAPTURE_PHY_STREAM_CLOSE_REQ: + if (!chan->is_stream_opened) { + dev_dbg(chan->dev, + "%s: NVCSI stream is already closed for this VI channel", + __func__); + return 0; + } + resp_id = CAPTURE_PHY_STREAM_CLOSE_RESP; + break; + case CAPTURE_PHY_STREAM_DUMPREGS_REQ: + resp_id = CAPTURE_PHY_STREAM_DUMPREGS_RESP; + break; + case CAPTURE_CSI_STREAM_SET_CONFIG_REQ: + resp_id = CAPTURE_CSI_STREAM_SET_CONFIG_RESP; + break; + case CAPTURE_CSI_STREAM_SET_PARAM_REQ: + resp_id = CAPTURE_CSI_STREAM_SET_PARAM_RESP; + break; + case CAPTURE_CSI_STREAM_TPG_SET_CONFIG_REQ: + resp_id = CAPTURE_CSI_STREAM_TPG_SET_CONFIG_RESP; + break; + case CAPTURE_CSI_STREAM_TPG_START_REQ: + resp_id = CAPTURE_CSI_STREAM_TPG_START_RESP; + capture->virtual_channel_id = + msg_cpy->csi_stream_tpg_start_req.virtual_channel_id; + break; + case CAPTURE_CSI_STREAM_TPG_START_RATE_REQ: + resp_id = CAPTURE_CSI_STREAM_TPG_START_RATE_RESP; + capture->virtual_channel_id = msg_cpy-> + csi_stream_tpg_start_rate_req.virtual_channel_id; + break; + case CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_REQ: + resp_id = CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_RESP; + break; + case CAPTURE_CSI_STREAM_TPG_STOP_REQ: + resp_id = CAPTURE_CSI_STREAM_TPG_STOP_RESP; + break; + case CAPTURE_CHANNEL_EI_REQ: + resp_id = CAPTURE_CHANNEL_EI_RESP; + break; + case CAPTURE_HSM_CHANSEL_ERROR_MASK_REQ: + resp_id = CAPTURE_HSM_CHANSEL_ERROR_MASK_RESP; + break; + default: + dev_err(chan->dev, "%s: unknown capture control req 0x%x", + __func__, header->msg_id); + return -EINVAL; + } + + err = vi_capture_ivc_send_control(chan, msg_cpy, size, resp_id); + if (err < 0) { + dev_err(chan->dev, "%s: failed to send IVC control message", __func__); + return err; + } + + if (header->msg_id == CAPTURE_PHY_STREAM_OPEN_REQ) + chan->is_stream_opened = true; + else if (header->msg_id == CAPTURE_PHY_STREAM_CLOSE_REQ) + chan->is_stream_opened = false; + + return err; +} + +/** + * @brief Disable the VI channel's NVCSI TPG stream in RCE. + * + * @param[in] chan VI channel context + * + * @returns 0 (success), neg. errno (failure) + */ +static int csi_stream_tpg_disable( + struct tegra_vi_channel *chan) +{ + struct vi_capture *capture = chan->capture_data; + struct CAPTURE_CONTROL_MSG control_desc; + struct CAPTURE_CONTROL_MSG *resp_msg = &capture->control_resp_msg; + int err = 0; + + memset(&control_desc, 0, sizeof(control_desc)); + control_desc.header.msg_id = CAPTURE_CSI_STREAM_TPG_STOP_REQ; + control_desc.header.channel_id = capture->channel_id; + control_desc.csi_stream_tpg_stop_req.stream_id = capture->stream_id; + control_desc.csi_stream_tpg_stop_req.virtual_channel_id = + capture->virtual_channel_id; + + err = vi_capture_ivc_send_control(chan, &control_desc, + sizeof(control_desc), CAPTURE_CSI_STREAM_TPG_STOP_RESP); + if ((err < 0) || + (resp_msg->csi_stream_tpg_stop_resp.result + != CAPTURE_OK)) + return err; + + return 0; +} + +/** + * @brief Disable the VI channel's NVCSI stream in RCE. + * + * @param[in] chan VI channel context + * + * @returns 0 (success), neg. errno (failure) + */ +static int csi_stream_close( + struct tegra_vi_channel *chan) +{ + struct vi_capture *capture = chan->capture_data; + struct CAPTURE_CONTROL_MSG control_desc; + struct CAPTURE_CONTROL_MSG *resp_msg = &capture->control_resp_msg; + int err = 0; + + memset(&control_desc, 0, sizeof(control_desc)); + control_desc.header.msg_id = CAPTURE_PHY_STREAM_CLOSE_REQ; + control_desc.header.channel_id = capture->channel_id; + control_desc.phy_stream_close_req.phy_type = NVPHY_TYPE_CSI; + control_desc.phy_stream_close_req.stream_id = capture->stream_id; + control_desc.phy_stream_close_req.csi_port = capture->csi_port; + + err = vi_capture_control_send_message(chan, &control_desc, + sizeof(control_desc)); + if ((err < 0) || + (resp_msg->phy_stream_close_resp.result != CAPTURE_OK)) + return err; + + return 0; +} + +int csi_stream_release( + struct tegra_vi_channel *chan) +{ + struct vi_capture *capture = chan->capture_data; + int err = 0; + + if (capture->stream_id == NVCSI_STREAM_INVALID_ID) + return 0; + + if (capture->virtual_channel_id != NVCSI_STREAM_INVALID_TPG_VC_ID) { + err = csi_stream_tpg_disable(chan); + if (err < 0) { + dev_err(chan->dev, + "%s: failed to disable nvcsi tpg on stream %u virtual channel %u\n", + __func__, capture->stream_id, + capture->virtual_channel_id); + return err; + } + } + + if (chan->is_stream_opened) { + err = csi_stream_close(chan); + if (err < 0) + dev_err(chan->dev, + "%s: failed to close nvcsi stream %u\n", + __func__, capture->stream_id); + } + + return err; +} + +int vi_capture_control_message_from_user( + struct tegra_vi_channel *chan, + struct vi_capture_control_msg *msg) +{ + struct vi_capture *capture; + const void __user *msg_ptr; + void __user *response; + void *msg_cpy; + struct CAPTURE_CONTROL_MSG *resp_msg; + int err = 0; + + if (chan == NULL) { + dev_err(NULL, "%s: NULL VI channel received\n", __func__); + return -ENODEV; + } + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_SET_CONFIG); + + capture = chan->capture_data; + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + resp_msg = &capture->control_resp_msg; + + if (msg->ptr == 0ull || msg->response == 0ull || msg->size == 0) + return -EINVAL; + + msg_ptr = (const void __user *)(uintptr_t)msg->ptr; + response = (void __user *)(uintptr_t)msg->response; + + msg_cpy = kzalloc(msg->size, GFP_KERNEL); + if (unlikely(msg_cpy == NULL)) + return -ENOMEM; + + err = copy_from_user(msg_cpy, msg_ptr, msg->size) ? -EFAULT : 0; + if (err < 0) + goto fail; + + + err = vi_capture_control_send_message(chan, msg_cpy, msg->size); + if (err < 0) + goto fail; + + err = copy_to_user(response, resp_msg, + sizeof(*resp_msg)) ? -EFAULT : 0; + if (err < 0) + goto fail; + +fail: + kfree(msg_cpy); + return err; +} +EXPORT_SYMBOL_GPL(vi_capture_control_message_from_user); + +int vi_capture_control_message( + struct tegra_vi_channel *chan, + struct vi_capture_control_msg *msg) +{ + struct vi_capture *capture; + void *msg_cpy; + struct CAPTURE_CONTROL_MSG *resp_msg; + int err = 0; + + if (chan == NULL) { + dev_err(NULL,"%s: NULL VI channel received\n", __func__); + return -ENODEV; + } + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_SET_CONFIG); + + capture = chan->capture_data; + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + resp_msg = &capture->control_resp_msg; + if (msg->ptr == 0ull || msg->response == 0ull || msg->size == 0) + return -EINVAL; + + msg_cpy = kzalloc(msg->size, GFP_KERNEL); + if (unlikely(msg_cpy == NULL)) + return -ENOMEM; + + memcpy(msg_cpy, (const void *)(uintptr_t)msg->ptr, + msg->size); + + err = vi_capture_control_send_message(chan, msg_cpy, msg->size); + if (err < 0) + goto fail; + + memcpy((void *)(uintptr_t)msg->response, resp_msg, + sizeof(*resp_msg)); + +fail: + kfree(msg_cpy); + return err; +} + +int vi_capture_get_info( + struct tegra_vi_channel *chan, + struct vi_capture_info *info) +{ + struct vi_capture *capture = chan->capture_data; + int err; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_GET_INFO); + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_INVALID_ID) { + dev_err(chan->dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + if (info == NULL) + return -EINVAL; + + info->syncpts.progress_syncpt = capture->progress_sp.id; + info->syncpts.emb_data_syncpt = capture->embdata_sp.id; + info->syncpts.line_timer_syncpt = capture->linetimer_sp.id; + + err = vi_capture_read_syncpt(chan, &capture->progress_sp, + &info->syncpts.progress_syncpt_val); + if (err < 0) + return err; + err = vi_capture_read_syncpt(chan, &capture->embdata_sp, + &info->syncpts.emb_data_syncpt_val); + if (err < 0) + return err; + err = vi_capture_read_syncpt(chan, &capture->linetimer_sp, + &info->syncpts.line_timer_syncpt_val); + if (err < 0) + return err; + + info->hw_channel_id = capture->channel_id; + info->vi_channel_mask = capture->vi_channel_mask; + info->vi2_channel_mask = capture->vi2_channel_mask; + + return 0; +} +EXPORT_SYMBOL_GPL(vi_capture_get_info); + +int vi_capture_request( + struct tegra_vi_channel *chan, + struct vi_capture_req *req) +{ + struct vi_capture *capture = chan->capture_data; + struct CAPTURE_MSG capture_desc; + int err = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_REQUEST); + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_INVALID_ID) { + dev_err(chan->dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + if (req == NULL) { + dev_err(chan->dev, + "%s: Invalid req\n", __func__); + return -EINVAL; + } + + mutex_lock(&capture->reset_lock); + + memset(&capture_desc, 0, sizeof(capture_desc)); + capture_desc.header.msg_id = CAPTURE_REQUEST_REQ; + capture_desc.header.channel_id = capture->channel_id; + capture_desc.capture_request_req.buffer_index = req->buffer_index; + + nv_camera_log_submit( + chan->ndev, + capture->progress_sp.id, + capture->progress_sp.threshold, + capture_desc.header.channel_id, + __arch_counter_get_cntvct()); + + dev_dbg(chan->dev, "%s: sending chan_id %u msg_id %u buf:%u\n", + __func__, capture_desc.header.channel_id, + capture_desc.header.msg_id, req->buffer_index); + err = tegra_capture_ivc_capture_submit(&capture_desc, + sizeof(capture_desc)); + if (err < 0) { + mutex_unlock(&capture->reset_lock); + dev_err(chan->dev, "IVC capture submit failed\n"); + return err; + } + + mutex_unlock(&capture->reset_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(vi_capture_request); + +int vi_capture_status( + struct tegra_vi_channel *chan, + int32_t timeout_ms) +{ + struct vi_capture *capture = chan->capture_data; + int ret = 0; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_STATUS); + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + if (capture->channel_id == CAPTURE_CHANNEL_INVALID_ID) { + dev_err(chan->dev, + "%s: setup channel first\n", __func__); + return -ENODEV; + } + + dev_dbg(chan->dev, "%s: waiting for status, timeout:%d ms\n", + __func__, timeout_ms); + + /* negative timeout means wait forever */ + if (timeout_ms < 0) { + wait_for_completion(&capture->capture_resp); + } else { + ret = wait_for_completion_timeout( + &capture->capture_resp, + msecs_to_jiffies(timeout_ms)); + if (ret == 0) { + dev_dbg(chan->dev, + "capture status timed out\n"); + return -ETIMEDOUT; + } + } + + if (ret < 0) { + dev_err(chan->dev, + "wait for capture status failed\n"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(vi_capture_status); + +int vi_capture_set_progress_status_notifier( + struct tegra_vi_channel *chan, + struct vi_capture_progress_status_req *req) +{ + int err = 0; + struct vi_capture *capture = chan->capture_data; + + nv_camera_log(chan->ndev, + __arch_counter_get_cntvct(), + NVHOST_CAMERA_VI_CAPTURE_SET_PROGRESS_STATUS); + + if (req->mem == 0 || + req->buffer_depth == 0) { + dev_err(chan->dev, + "%s: request buffer is invalid\n", __func__); + return -EINVAL; + } + + if (capture == NULL) { + dev_err(chan->dev, + "%s: vi capture uninitialized\n", __func__); + return -ENODEV; + } + + if (req->buffer_depth < capture->queue_depth) { + dev_err(chan->dev, + "Progress status buffer is smaller than queue depth"); + return -EINVAL; + } + + /* Setup the progress status buffer */ + err = capture_common_setup_progress_status_notifier( + &capture->progress_status_notifier, + req->mem, + sizeof(uint32_t) * req->buffer_depth, + req->mem_offset); + + if (err < 0) { + dev_err(chan->dev, "%s: memory setup failed\n", __func__); + return -EFAULT; + } + + dev_dbg(chan->dev, "mem offset %u\n", req->mem_offset); + dev_dbg(chan->dev, "buffer depth %u\n", req->buffer_depth); + + capture->progress_status_buffer_depth = req->buffer_depth; + capture->is_progress_status_notifier_set = true; + return err; +} +EXPORT_SYMBOL_GPL(vi_capture_set_progress_status_notifier); + +static int csi_vi_get_mapping_table(struct platform_device *pdev) +{ + uint32_t index = 0; + struct device *dev = &pdev->dev; + struct tegra_capture_vi_data *info = platform_get_drvdata(pdev); + + int nmap_elems; + uint32_t map_table_size; + uint32_t *map_table = info->vi_instance_table; + + const struct device_node *np = dev->of_node; + + (void)of_property_read_u32(np, + "nvidia,vi-mapping-size", &map_table_size); + if (map_table_size > MAX_NVCSI_STREAM_IDS) { + dev_err(dev, "invalid mapping table size %u\n", map_table_size); + return -EINVAL; + } + info->num_csi_vi_maps = map_table_size; + + nmap_elems = of_property_count_strings(np, "nvidia,vi-mapping-names"); + if (nmap_elems != ARRAY_SIZE(vi_mapping_elements)) + return -EINVAL; + + /* check for order of csi-stream-id and vi-unit-id in DT entry */ + for (index = 0; index < ARRAY_SIZE(vi_mapping_elements); index++) { + int map_elem = of_property_match_string(np, + "nvidia,vi-mapping-names", vi_mapping_elements[index]); + if (map_elem != index) { + dev_err(dev, "invalid mapping order\n"); + return -EINVAL; + } + } + + for (index = 0; index < map_table_size; index++) + map_table[index] = INVALID_VI_UNIT_ID; + + for (index = 0; index < map_table_size; index++) { + uint32_t stream_index = NVCSI_STREAM_INVALID_ID; + uint32_t vi_unit_id = INVALID_VI_UNIT_ID; + + (void)of_property_read_u32_index(np, + "nvidia,vi-mapping", + 2 * index, + &stream_index); + + /* Check for valid/duplicate csi-stream-id */ + if (stream_index >= MAX_NVCSI_STREAM_IDS || + map_table[stream_index] != INVALID_VI_UNIT_ID) { + dev_err(dev, "%s: mapping invalid csi_stream_id: %u\n", + __func__, stream_index); + return -EINVAL; + } + + (void)of_property_read_u32_index(np, + "nvidia,vi-mapping", + 2 * index + 1, + &vi_unit_id); + + /* check for valid vi-unit-id */ + if (vi_unit_id >= MAX_VI_UNITS) { + dev_err(dev, "%s: mapping invalid vi_unit_id: %u\n", + __func__, vi_unit_id); + return -EINVAL; + } + + map_table[stream_index] = vi_unit_id; + } + + dev_dbg(dev, "%s: csi-stream to vi-instance mapping table size: %u\n", + __func__, info->num_csi_vi_maps); + + for (index = 0; index < ARRAY_SIZE(info->vi_instance_table); index++) + dev_dbg(dev, "%s: vi_instance_table[%d] = %d\n", + __func__, index, info->vi_instance_table[index]); + + return 0; +} + +static int capture_vi_probe(struct platform_device *pdev) +{ + uint32_t ii; + int err = 0; + struct tegra_capture_vi_data *info; + struct device *dev = &pdev->dev; + + dev_dbg(dev, "%s: tegra-camrtc-capture-vi probe\n", __func__); + + info = devm_kzalloc(dev, + sizeof(*info), GFP_KERNEL); + if (info == NULL) + return -ENOMEM; + + info->num_vi_devices = 0; + + (void)of_property_read_u32(dev->of_node, "nvidia,vi-max-channels", + &info->max_vi_channels); + if (info->max_vi_channels == 0) + info->max_vi_channels = DEFAULT_VI_CHANNELS; + + for (ii = 0; ; ii++) { + struct device_node *np; + struct platform_device *pvidev; + + np = of_parse_phandle(dev->of_node, "nvidia,vi-devices", ii); + if (np == NULL) + break; + + if (info->num_vi_devices >= ARRAY_SIZE(info->vi_pdevices)) { + of_node_put(np); + err = -EINVAL; + goto cleanup; + } + + pvidev = of_find_device_by_node(np); + of_node_put(np); + + if (pvidev == NULL) { + dev_WARN(dev, "vi node %d has no device\n", ii); + err = -ENODEV; + goto cleanup; + } + + info->vi_pdevices[ii] = pvidev; + info->num_vi_devices++; + } + + if (info->num_vi_devices < 1) + return -EINVAL; + + platform_set_drvdata(pdev, info); + + if (info->num_vi_devices == 1) { + dev_dbg(dev, "default 0 vi-unit-id for all csi-stream-ids\n"); + } else { + /* read mapping table from DT for multiple VIs */ + err = csi_vi_get_mapping_table(pdev); + if (err) { + dev_err(dev, + "%s: reading csi-to-vi mapping failed\n", + __func__); + goto cleanup; + } + } + + err = vi_channel_drv_register(pdev, info->max_vi_channels); + if (err) { + vi_channel_drv_exit(); + goto cleanup; + } + + info->vi_common.mc_vi.vi = &info->vi_common; + info->vi_common.mc_vi.fops = &vi5_fops; + err = tegra_capture_vi_media_controller_init( + &info->vi_common.mc_vi, pdev); + if (err) { + dev_warn(&pdev->dev, "media controller init failed\n"); + err = 0; + } + + memset(channels, 0 , sizeof(channels)); + + return 0; + +cleanup: + for (ii = 0; ii < info->num_vi_devices; ii++) + put_device(&info->vi_pdevices[ii]->dev); + + dev_err(dev, "%s: tegra-camrtc-capture-vi probe failed\n", __func__); + return err; +} + +static int capture_vi_remove(struct platform_device *pdev) +{ + struct tegra_capture_vi_data *info; + uint32_t ii; + struct device *dev = &pdev->dev; + + dev_dbg(dev, "%s:tegra-camrtc-capture-vi remove\n", __func__); + + info = platform_get_drvdata(pdev); + + for (ii = 0; ii < info->num_vi_devices; ii++) + put_device(&info->vi_pdevices[ii]->dev); + + vi_channel_drv_exit(); + return 0; +} + +static const struct of_device_id capture_vi_of_match[] = { + { .compatible = "nvidia,tegra-camrtc-capture-vi" }, + { }, +}; +MODULE_DEVICE_TABLE(of, capture_vi_of_match); + +static struct platform_driver capture_vi_driver = { + .probe = capture_vi_probe, + .remove = capture_vi_remove, + .driver = { + .owner = THIS_MODULE, + .name = "tegra-camrtc-capture-vi", + .of_match_table = capture_vi_of_match + } +}; + +static int __init capture_vi_init(void) +{ + int err; + err = vi_channel_drv_init(); + if (err) + return err; + + err = platform_driver_register(&capture_vi_driver); + if (err) { + vi_channel_drv_exit(); + return err; + } + + return 0; +} +static void __exit capture_vi_exit(void) +{ + vi_channel_drv_exit(); + platform_driver_unregister(&capture_vi_driver); +} + +module_init(capture_vi_init); +module_exit(capture_vi_exit); + +MODULE_IMPORT_NS(DMA_BUF); +MODULE_DESCRIPTION("tegra fusa-capture driver"); +MODULE_LICENSE("GPL"); \ No newline at end of file diff --git a/drivers/media/platform/tegra/camera/nvcamera_log.c b/drivers/media/platform/tegra/camera/nvcamera_log.c new file mode 100644 index 00000000..40c54efb --- /dev/null +++ b/drivers/media/platform/tegra/camera/nvcamera_log.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * nvcamera_log.c - general tracing function for vi and isp API calls + * + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + */ + + +#include "nvcamera_log.h" +#include +#include +#include + +/* + * Set to 1 to enable additional kernel API traces + */ +#define NVCAM_ENABLE_EXTRA_TRACES 0 + +#if defined(CONFIG_EVENTLIB) +#include + +/* + * Camera "task submission" event enabled by default + */ +void nv_camera_log_submit(struct platform_device *pdev, + u32 syncpt_id, + u32 syncpt_thresh, + u32 channel_id, + u64 timestamp) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvhost_task_submit task_submit; + + if (!pdata->eventlib_id) + return; + + /* + * Write task submit event + */ + task_submit.syncpt_id = syncpt_id; + task_submit.syncpt_thresh = syncpt_thresh; + task_submit.channel_id = channel_id; + task_submit.class_id = pdata->class; + + /* + * Eventlib events are meant to be matched with their userspace + * analogues. Instead of the PID as (this) thread's ID use the + * inherited thread group ID. For the reported TID use this thread's + * ID (i.e. PID). + */ + task_submit.tid = current->pid; + task_submit.pid = current->tgid; + + keventlib_write(pdata->eventlib_id, + &task_submit, + sizeof(task_submit), + NVHOST_TASK_SUBMIT, + timestamp); +} + +#else + +void nv_camera_log_submit(struct platform_device *pdev, + u32 syncpt_id, + u32 syncpt_thresh, + u32 channel_id, + u64 timestamp) +{ +} + +#endif +EXPORT_SYMBOL_GPL(nv_camera_log_submit); + +#if defined(CONFIG_EVENTLIB) && NVCAM_ENABLE_EXTRA_TRACES +#include + +/* + * Additional camera traces disabled by default + */ +void nv_camera_log(struct platform_device *pdev, + u64 timestamp, + u32 type) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nv_camera_task_log task_log; + + if (!pdata->eventlib_id) + return; + + /* + * Write task log event + */ + task_log.class_id = pdata->class; + + /* + * Eventlib events are meant to be matched with their userspace + * analogues. Instead of the PID as (this) thread's ID use the + * inherited thread group ID. For the reported TID use this thread's + * ID (i.e. PID). + */ + task_log.tid = current->pid; + task_log.pid = current->tgid; + + keventlib_write(pdata->eventlib_id, + &task_log, + sizeof(task_log), + type, + timestamp); +} + +#else + +void nv_camera_log(struct platform_device *pdev, + u64 timestamp, + u32 type) +{ +} + +#endif +EXPORT_SYMBOL_GPL(nv_camera_log); diff --git a/drivers/media/platform/tegra/camera/nvcamera_log.h b/drivers/media/platform/tegra/camera/nvcamera_log.h new file mode 100644 index 00000000..3096bc63 --- /dev/null +++ b/drivers/media/platform/tegra/camera/nvcamera_log.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef __NVCAMERA_LOG_H +#define __NVCAMERA_LOG_H + +#include + +struct platform_device; + +void nv_camera_log_submit(struct platform_device *pdev, + u32 syncpt_id, + u32 syncpt_thresh, + u32 channel_id, + u64 timestamp); + +void nv_camera_log(struct platform_device *pdev, + u64 timestamp, + u32 type); + +#endif diff --git a/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.c b/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.c new file mode 100644 index 00000000..c4a1ea4f --- /dev/null +++ b/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Tegra CSI5 device common APIs + * + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include +#include +#include +#include "nvcsi/nvcsi.h" +#include "csi5_fops.h" +#include +#include +#include +#include "soc/tegra/camrtc-capture-messages.h" +#include + +/* Referred from capture-scheduler.c defined in rtcpu-fw */ +#define NUM_CAPTURE_CHANNELS 64 + +/* Temporary ids for the clients whose channel-id is not yet allocated */ +#define NUM_CAPTURE_TRANSACTION_IDS 64 + +#define TOTAL_CHANNELS (NUM_CAPTURE_CHANNELS + NUM_CAPTURE_TRANSACTION_IDS) + +static inline u32 csi5_port_to_stream(u32 csi_port) +{ + return (csi_port < NVCSI_PORT_E) ? + csi_port : (((csi_port - NVCSI_PORT_E) >> 1U) + NVCSI_PORT_E); +} + +static int csi5_power_on(struct tegra_csi_device *csi) +{ + int err = 0; + + dev_dbg(csi->dev, "%s\n", __func__); + + err = nvhost_module_busy(csi->pdev); + if (err) + dev_err(csi->dev, "%s:cannot enable csi\n", __func__); + + return err; +} + +static int csi5_power_off(struct tegra_csi_device *csi) +{ + dev_dbg(csi->dev, "%s\n", __func__); + + nvhost_module_idle(csi->pdev); + + return 0; +} + +static int verify_capture_control_response(const uint32_t result) +{ + int err = 0; + + switch (result) { + case CAPTURE_OK: + { + err = 0; + break; + } + case CAPTURE_ERROR_INVALID_PARAMETER: + { + err = -EINVAL; + break; + } + case CAPTURE_ERROR_NO_MEMORY: + { + err = -ENOMEM; + break; + } + case CAPTURE_ERROR_BUSY: + { + err = -EBUSY; + break; + } + case CAPTURE_ERROR_NOT_SUPPORTED: + case CAPTURE_ERROR_NOT_INITIALIZED: + { + err = -EPERM; + break; + } + case CAPTURE_ERROR_OVERFLOW: + { + err = -EOVERFLOW; + break; + } + case CAPTURE_ERROR_NO_RESOURCES: + { + err = -ENODEV; + break; + } + default: + { + err = -EINVAL; + break; + } + } + + return err; +} + +static int csi5_send_control_message( + struct tegra_vi_channel *chan, + struct CAPTURE_CONTROL_MSG *msg, + uint32_t *result) +{ + int err = 0; + struct vi_capture_control_msg vi_msg; + (void) memset(&vi_msg, 0, sizeof(vi_msg)); + vi_msg.ptr = (uint64_t)msg; + vi_msg.size = sizeof(*msg); + vi_msg.response = (uint64_t)msg; + + err = vi_capture_control_message(chan, &vi_msg); + if (err < 0) + return err; + + return verify_capture_control_response(*result); +} + +static int csi5_stream_open(struct tegra_csi_channel *chan, u32 stream_id, + u32 csi_port) +{ + + struct tegra_csi_device *csi = chan->csi; + struct tegra_channel *tegra_chan = + v4l2_get_subdev_hostdata(&chan->subdev); + struct CAPTURE_CONTROL_MSG msg; + int vi_port = 0; + /* If the tegra_vi_channel is NULL it means that is PCL TPG usecase where fusa UMD opens the + * VI channel and sends channel messages but for CSI messages it uses this V4L2 path. + * In such a case query fusacapture KMD for the tegra_vi_channel associated with the + * current stream id/vc id combination. + * If still NULL, we are in erroroneous state, exit with error. + */ + if (tegra_chan->tegra_vi_channel[0] == NULL) { + tegra_chan->tegra_vi_channel[0] = get_tegra_vi_channel(stream_id, + tegra_chan->virtual_channel); + if (tegra_chan->tegra_vi_channel[0] == NULL) { + dev_err(csi->dev, "%s: VI channel not found for stream- %d vc- %d\n", + __func__,stream_id,tegra_chan->virtual_channel); + return -EINVAL; + } + } + + /* Open NVCSI stream */ + memset(&msg, 0, sizeof(msg)); + msg.header.msg_id = CAPTURE_PHY_STREAM_OPEN_REQ; + + msg.phy_stream_open_req.stream_id = stream_id; + msg.phy_stream_open_req.csi_port = csi_port; + + if (tegra_chan->valid_ports > 1) + vi_port = (stream_id > 0) ? 1 : 0; + else + vi_port = 0; + + return csi5_send_control_message(tegra_chan->tegra_vi_channel[vi_port], &msg, + &msg.phy_stream_open_resp.result); +} + +static void csi5_stream_close(struct tegra_csi_channel *chan, u32 stream_id, + u32 csi_port) +{ + struct tegra_csi_device *csi = chan->csi; + struct tegra_channel *tegra_chan = + v4l2_get_subdev_hostdata(&chan->subdev); + int err = 0; + int vi_port = 0; + + struct CAPTURE_CONTROL_MSG msg; + + /* Close NVCSI stream */ + memset(&msg, 0, sizeof(msg)); + msg.header.msg_id = CAPTURE_PHY_STREAM_CLOSE_REQ; + + msg.phy_stream_close_req.stream_id = stream_id; + msg.phy_stream_close_req.csi_port = csi_port; + + if (tegra_chan->valid_ports > 1) + vi_port = (stream_id > 0) ? 1 : 0; + else + vi_port = 0; + + err = csi5_send_control_message(tegra_chan->tegra_vi_channel[vi_port], &msg, + &msg.phy_stream_open_resp.result); + if (err < 0) { + dev_err(csi->dev, "%s: Error in closing stream_id=%u, csi_port=%u\n", + __func__, stream_id, csi_port); + } + + return; +} + +static int csi5_stream_set_config(struct tegra_csi_channel *chan, u32 stream_id, + u32 csi_port, int csi_lanes) +{ + struct tegra_csi_device *csi = chan->csi; + struct tegra_channel *tegra_chan = + v4l2_get_subdev_hostdata(&chan->subdev); + + struct camera_common_data *s_data = chan->s_data; + const struct sensor_mode_properties *mode = NULL; + + unsigned int cil_settletime = 0; + int vi_port = 0; + + struct CAPTURE_CONTROL_MSG msg; + struct nvcsi_brick_config brick_config; + struct nvcsi_cil_config cil_config; + bool is_cphy = (csi_lanes == 3); + dev_dbg(csi->dev, "%s: stream_id=%u, csi_port=%u\n", + __func__, stream_id, csi_port); + + /* Attempt to find the cil_settingtime from the device tree */ + if (s_data) { + int idx = s_data->mode_prop_idx; + + dev_dbg(csi->dev, "cil_settingtime is pulled from device"); + if (idx < s_data->sensor_props.num_modes) { + mode = &s_data->sensor_props.sensor_modes[idx]; + cil_settletime = mode->signal_properties.cil_settletime; + } else { + dev_dbg(csi->dev, "mode not listed in DT, use default"); + cil_settletime = 0; + } + } else if (chan->of_node) { + int err = 0; + const char *str; + + dev_dbg(csi->dev, + "cil_settletime is pulled from device of_node"); + err = of_property_read_string(chan->of_node, "cil_settletime", + &str); + if (!err) { + err = kstrtou32(str, 10, &cil_settletime); + if (err) { + dev_dbg(csi->dev, + "no cil_settletime in of_node"); + cil_settletime = 0; + } + } + } + + /* Brick config */ + memset(&brick_config, 0, sizeof(brick_config)); + brick_config.phy_mode = (!is_cphy) ? + NVCSI_PHY_TYPE_DPHY : NVCSI_PHY_TYPE_CPHY; + + /* CIL config */ + memset(&cil_config, 0, sizeof(cil_config)); + cil_config.num_lanes = csi_lanes; + cil_config.lp_bypass_mode = is_cphy ? 0 : 1; + cil_config.t_hs_settle = cil_settletime; + + if (s_data && !chan->pg_mode) + cil_config.mipi_clock_rate = read_mipi_clk_from_dt(chan) / 1000; + else + cil_config.mipi_clock_rate = csi->clk_freq / 1000; + + /* Set NVCSI stream config */ + memset(&msg, 0, sizeof(msg)); + msg.header.msg_id = CAPTURE_CSI_STREAM_SET_CONFIG_REQ; + + msg.csi_stream_set_config_req.stream_id = stream_id; + msg.csi_stream_set_config_req.csi_port = csi_port; + msg.csi_stream_set_config_req.brick_config = brick_config; + msg.csi_stream_set_config_req.cil_config = cil_config; + + if (tegra_chan->valid_ports > 1) + vi_port = (stream_id > 0) ? 1 : 0; + else + vi_port = 0; + + return csi5_send_control_message(tegra_chan->tegra_vi_channel[vi_port], &msg, + &msg.csi_stream_set_config_resp.result); +} + +static int csi5_stream_tpg_start(struct tegra_csi_channel *chan, u32 stream_id, + u32 virtual_channel_id) +{ + int err = 0; + struct tegra_csi_device *csi = chan->csi; + struct tegra_csi_port *port = &chan->ports[0]; + struct tegra_channel *tegra_chan = + v4l2_get_subdev_hostdata(&chan->subdev); + + struct CAPTURE_CONTROL_MSG msg; + union nvcsi_tpg_config *tpg_config = NULL; + + dev_dbg(csi->dev, "%s: stream_id=%u, virtual_channel_id=%d\n", + __func__, stream_id, virtual_channel_id); + + /* Set TPG config for a virtual channel */ + memset(&msg, 0, sizeof(msg)); + msg.header.msg_id = CAPTURE_CSI_STREAM_TPG_SET_CONFIG_REQ; + + tpg_config = &(msg.csi_stream_tpg_set_config_req.tpg_config); + + csi->get_tpg_settings(port, tpg_config); + err = csi5_send_control_message(tegra_chan->tegra_vi_channel[0], &msg, + &msg.csi_stream_tpg_set_config_resp.result); + if (err < 0) { + dev_err(csi->dev, "%s: Error in TPG set config stream_id=%u, csi_port=%u\n", + __func__, port->stream_id, port->csi_port); + } + + /* Enable TPG on a stream */ + memset(&msg, 0, sizeof(msg)); + msg.header.msg_id = CAPTURE_CSI_STREAM_TPG_START_RATE_REQ; + + msg.csi_stream_tpg_start_rate_req.stream_id = stream_id; + msg.csi_stream_tpg_start_rate_req.virtual_channel_id = virtual_channel_id; + msg.csi_stream_tpg_start_rate_req.frame_rate = port->framerate; + + err = csi5_send_control_message(tegra_chan->tegra_vi_channel[0], &msg, + &msg.csi_stream_tpg_start_resp.result); + if (err < 0) { + dev_err(csi->dev, "%s: Error in TPG start stream_id=%u, csi_port=%u\n", + __func__, port->stream_id, port->csi_port); + } + + return err; +} + +static void csi5_stream_tpg_stop(struct tegra_csi_channel *chan, u32 stream_id, + u32 virtual_channel_id) +{ + struct tegra_csi_device *csi = chan->csi; + struct tegra_channel *tegra_chan = + v4l2_get_subdev_hostdata(&chan->subdev); + int err = 0; + + struct CAPTURE_CONTROL_MSG msg; + + dev_dbg(csi->dev, "%s: stream_id=%u, virtual_channel_id=%d\n", + __func__, stream_id, virtual_channel_id); + + /* Disable TPG on a stream */ + memset(&msg, 0, sizeof(msg)); + msg.header.msg_id = CAPTURE_CSI_STREAM_TPG_STOP_REQ; + + msg.csi_stream_tpg_stop_req.stream_id = stream_id; + msg.csi_stream_tpg_stop_req.virtual_channel_id = virtual_channel_id; + + err = csi5_send_control_message(tegra_chan->tegra_vi_channel[0], &msg, + &msg.csi_stream_tpg_stop_resp.result); + if (err < 0) { + dev_err(csi->dev, "%s: Error in TPG stop stream_id=%u\n", + __func__, stream_id); + } +} + +/* Transform the user mode setting to TPG recoginzable equivalent. Gain ratio + * supported by TPG is in range of 0.125 to 8. From userspace we multiply the + * gain setting by 8, before v4l2 ioctl call. It is tranformed before + * IVC message + */ +static uint32_t get_tpg_gain_ratio_setting(int gain_ratio_tpg) +{ + const uint32_t tpg_gain_ratio_settings[] = { + CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_ONE_EIGHTH, + CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_ONE_FOURTH, + CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_HALF, + CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_NONE, + CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_TWO_TO_ONE, + CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_FOUR_TO_ONE, + CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_EIGHT_TO_ONE}; + + return tpg_gain_ratio_settings[order_base_2(gain_ratio_tpg)]; +} + +int csi5_tpg_set_gain(struct tegra_csi_channel *chan, int gain_ratio_tpg) +{ + struct tegra_csi_device *csi = chan->csi; + struct tegra_csi_port *port = &chan->ports[0]; + struct tegra_channel *tegra_chan = + v4l2_get_subdev_hostdata(&chan->subdev); + int err = 0; + struct CAPTURE_CONTROL_MSG msg; + + if (!chan->pg_mode) { + dev_err(csi->dev, "Gain to be set only in TPG mode\n"); + return -EINVAL; + } + + if (tegra_chan->tegra_vi_channel[0] == NULL) { + /* We come here during initial v4l2 ctrl setup during TPG LKM + * loading + */ + dev_dbg(csi->dev, "VI channel is not setup yet\n"); + return 0; + } + + (void)memset(&msg, 0, sizeof(msg)); + msg.header.msg_id = CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_REQ; + msg.csi_stream_tpg_apply_gain_req.stream_id = port->stream_id; + msg.csi_stream_tpg_apply_gain_req.virtual_channel_id = + port->virtual_channel_id; + msg.csi_stream_tpg_apply_gain_req.gain_ratio = + get_tpg_gain_ratio_setting(gain_ratio_tpg); + + err = csi5_send_control_message(tegra_chan->tegra_vi_channel[0], &msg, + &msg.csi_stream_tpg_apply_gain_resp.result); + if (err < 0) { + dev_err(csi->dev, "%s: Error in setting TPG gain stream_id=%u, csi_port=%u\n", + __func__, port->stream_id, port->csi_port); + } + + return err; +} + +static int csi5_start_streaming(struct tegra_csi_channel *chan, int port_idx) +{ + int err = 0, num_lanes; + struct tegra_csi_device *csi = chan->csi; + struct tegra_csi_port *port = &chan->ports[port_idx]; + u32 csi_pt, st_id, vc_id; + + if (chan->pg_mode) { + csi_pt = NVCSI_PORT_UNSPECIFIED; + st_id = port->stream_id; + } else { + csi_pt = port->csi_port; + st_id = csi5_port_to_stream(port->csi_port); + } + vc_id = port->virtual_channel_id; + num_lanes = port->lanes; + + dev_dbg(csi->dev, "%s: csi_pt=%u, st_id=%u, vc_id=%u, pg_mode=0x%x\n", + __func__, csi_pt, st_id, vc_id, chan->pg_mode); + + if (!chan->pg_mode) + csi5_stream_set_config(chan, st_id, csi_pt, num_lanes); + + csi5_stream_open(chan, st_id, csi_pt); + + if (chan->pg_mode) { + err = csi5_stream_tpg_start(chan, st_id, vc_id); + if (err) + return err; + } + + return err; +} + +static void csi5_stop_streaming(struct tegra_csi_channel *chan, int port_idx) +{ + struct tegra_csi_device *csi = chan->csi; + struct tegra_csi_port *port = &chan->ports[port_idx]; + u32 csi_pt, st_id, vc_id; + + if (chan->pg_mode) { + csi_pt = NVCSI_PORT_UNSPECIFIED; + st_id = port->stream_id; + } else { + csi_pt = port->csi_port; + st_id = csi5_port_to_stream(port->csi_port); + } + vc_id = port->virtual_channel_id; + + dev_dbg(csi->dev, "%s: csi_pt=%u, st_id=%u, vc_id=%u, pg_mode=0x%x\n", + __func__, csi_pt, st_id, vc_id, chan->pg_mode); + + if (chan->pg_mode) + csi5_stream_tpg_stop(chan, st_id, vc_id); + + csi5_stream_close(chan, st_id, csi_pt); +} + +static int csi5_error_recover(struct tegra_csi_channel *chan, int port_idx) +{ + int err = 0; + struct tegra_csi_device *csi = chan->csi; + struct tegra_csi_port *port = &chan->ports[0]; + + csi5_stop_streaming(chan, port_idx); + + err = csi5_start_streaming(chan, port_idx); + if (err) { + dev_err(csi->dev, "failed to restart csi stream %d\n", + csi5_port_to_stream(port->csi_port)); + } + + return err; +} + +static int csi5_mipi_cal(struct tegra_csi_channel *chan) +{ + /* Camera RTCPU handles MIPI calibration */ + return 0; +} + +static int csi5_hw_init(struct tegra_csi_device *csi) +{ + dev_dbg(csi->dev, "%s\n", __func__); + + csi->iomem[0] = csi->iomem_base + CSI5_TEGRA_CSI_STREAM_0_BASE; + csi->iomem[1] = csi->iomem_base + CSI5_TEGRA_CSI_STREAM_2_BASE; + csi->iomem[2] = csi->iomem_base + CSI5_TEGRA_CSI_STREAM_4_BASE; + + return 0; +} + +struct tegra_csi_fops csi5_fops = { + .csi_power_on = csi5_power_on, + .csi_power_off = csi5_power_off, + .csi_start_streaming = csi5_start_streaming, + .csi_stop_streaming = csi5_stop_streaming, + .csi_error_recover = csi5_error_recover, + .mipical = csi5_mipi_cal, + .hw_init = csi5_hw_init, + .tpg_set_gain = csi5_tpg_set_gain, +}; +EXPORT_SYMBOL(csi5_fops); \ No newline at end of file diff --git a/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.h b/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.h new file mode 100644 index 00000000..291a1084 --- /dev/null +++ b/drivers/media/platform/tegra/camera/nvcsi/csi5_fops.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra CSI5 device common APIs + * + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __CSI5_H__ +#define __CSI5_H__ + +extern struct tegra_csi_fops csi5_fops; + +int csi5_tpg_set_gain(struct tegra_csi_channel *chan, int gain_ratio_tpg); + +#endif diff --git a/drivers/media/platform/tegra/camera/regmap_util.c b/drivers/media/platform/tegra/camera/regmap_util.c new file mode 100644 index 00000000..d81aa77a --- /dev/null +++ b/drivers/media/platform/tegra/camera/regmap_util.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * regmap_util.c - utilities for writing regmap tables + * + * Copyright (c) 2013-2022, NVIDIA Corporation. All Rights Reserved. + */ + +#include +#include +#include + +int +regmap_util_write_table_8(struct regmap *regmap, + const struct reg_8 table[], + const struct reg_8 override_list[], + int num_override_regs, u16 wait_ms_addr, u16 end_addr) +{ + int err = 0; + const struct reg_8 *next; + int i; + u8 val; + + int range_start = -1; + unsigned int range_count = 0; + /* bug 200048392 - + * the vi i2c cannot take a FIFO buffer bigger than 16 bytes + */ + u8 range_vals[16]; + int max_range_vals = ARRAY_SIZE(range_vals); + + for (next = table;; next++) { + /* If we have a range open and */ + /* either the address doesn't match */ + /* or the temporary storage is full, flush */ + if ((next->addr != range_start + range_count) || + (next->addr == end_addr) || + (next->addr == wait_ms_addr) || + (range_count == max_range_vals)) { + + if (range_count == 1) { + err = + regmap_write(regmap, range_start, + range_vals[0]); + } else if (range_count > 1) { + err = + regmap_bulk_write(regmap, range_start, + &range_vals[0], + range_count); + } + + if (err) { + pr_err("%s:regmap_util_write_table:%d", + __func__, err); + return err; + } + + range_start = -1; + range_count = 0; + + /* Handle special address values */ + if (next->addr == end_addr) + break; + + if (next->addr == wait_ms_addr) { + msleep_range(next->val); + continue; + } + } + + val = next->val; + + /* When an override list is passed in, replace the reg */ + /* value to write if the reg is in the list */ + if (override_list) { + for (i = 0; i < num_override_regs; i++) { + if (next->addr == override_list[i].addr) { + val = override_list[i].val; + break; + } + } + } + + if (range_start == -1) + range_start = next->addr; + + range_vals[range_count++] = val; + } + return 0; +} + +EXPORT_SYMBOL_GPL(regmap_util_write_table_8); + +int +regmap_util_write_table_16_as_8(struct regmap *regmap, + const struct reg_16 table[], + const struct reg_16 override_list[], + int num_override_regs, + u16 wait_ms_addr, u16 end_addr) +{ + int err = 0; + const struct reg_16 *next; + int i; + u16 val; + + int range_start = -1; + unsigned int range_count = 0; + u8 range_vals[256]; + int max_range_vals = ARRAY_SIZE(range_vals) - 1; + + for (next = table;; next++) { + /* If we have a range open and */ + /* either the address doesn't match */ + /* or the temporary storage is full, flush*/ + if ((next->addr != range_start + range_count) || + (next->addr == end_addr) || + (next->addr == wait_ms_addr) || + (range_count == max_range_vals)) { + + if (range_count > 1) { + err = + regmap_bulk_write(regmap, range_start, + &range_vals[0], + range_count); + } + + if (err) { + pr_err("%s:regmap_util_write_table:%d", + __func__, err); + return err; + } + + range_start = -1; + range_count = 0; + + /* Handle special address values */ + if (next->addr == end_addr) + break; + + if (next->addr == wait_ms_addr) { + msleep_range(next->val); + continue; + } + } + + val = next->val; + + /* When an override list is passed in, replace the reg */ + /* value to write if the reg is in the list */ + if (override_list) { + for (i = 0; i < num_override_regs; i++) { + if (next->addr == override_list[i].addr) { + val = override_list[i].val; + break; + } + } + } + + if (range_start == -1) + range_start = next->addr; + + range_vals[range_count++] = (u8) (val >> 8); + range_vals[range_count++] = (u8) (val & 0xFF); + } + return 0; +} + +EXPORT_SYMBOL_GPL(regmap_util_write_table_16_as_8); +MODULE_LICENSE("GPL"); + diff --git a/drivers/media/platform/tegra/camera/sensor_common.c b/drivers/media/platform/tegra/camera/sensor_common.c new file mode 100644 index 00000000..b058bdd8 --- /dev/null +++ b/drivers/media/platform/tegra/camera/sensor_common.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sensor_common.c - utilities for tegra sensor drivers + * + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include + +static int read_property_u32( + struct device_node *node, const char *name, u32 *value) +{ + const char *str; + int err = 0; + + err = of_property_read_string(node, name, &str); + if (err) + return -ENODATA; + + err = kstrtou32(str, 10, value); + if (err) + return -EFAULT; + + return 0; +} + +static int read_property_u64( + struct device_node *node, const char *name, u64 *value) +{ + const char *str; + int err = 0; + + err = of_property_read_string(node, name, &str); + if (err) + return -ENODATA; + + err = kstrtou64(str, 10, value); + if (err) + return -EFAULT; + + return 0; +} + +static int sensor_common_parse_signal_props( + struct device *dev, struct device_node *node, + struct sensor_signal_properties *signal) +{ + const char *temp_str; + int err = 0; + u32 value = 0; + u64 val64 = 0; + u64 rate; + int depth; + + err = of_property_read_string(node, "phy_mode", &temp_str); + if (err) { + dev_dbg(dev, "%s: use default phy mode DPHY\n", __func__); + signal->phy_mode = CSI_PHY_MODE_DPHY; + } else { + if (strcmp(temp_str, "CPHY") == 0) + signal->phy_mode = CSI_PHY_MODE_CPHY; + else if (strcmp(temp_str, "DPHY") == 0) + signal->phy_mode = CSI_PHY_MODE_DPHY; + else if (strcmp(temp_str, "SLVS") == 0) + signal->phy_mode = SLVS_EC; + else { + dev_err(dev, "%s: Invalid Phy mode\n", __func__); + return -EINVAL; + } + } + + /* Do not report error for these properties yet */ + err = read_property_u32(node, "readout_orientation", &value); + if (err) + signal->readout_orientation = 0; + else + signal->readout_orientation = value; + + err = read_property_u32(node, "mclk_khz", &value); + if (err) + signal->mclk_freq = 0; + else + signal->mclk_freq = value; + + err = read_property_u32(node, "num_lanes", &value); + if (err) { + dev_err(dev, "%s:num_lanes property missing\n", __func__); + return err; + } + signal->num_lanes = value; + + err = read_property_u64(node, "pix_clk_hz", &val64); + if (err) { + dev_err(dev, "%s:pix_clk_hz property missing\n", __func__); + return err; + } + signal->pixel_clock.val = val64; + + err = read_property_u64(node, "serdes_pix_clk_hz", &val64); + if (err) + signal->serdes_pixel_clock.val = 0; + else + signal->serdes_pixel_clock.val = val64; + + if (signal->serdes_pixel_clock.val != 0ULL) { + if (signal->serdes_pixel_clock.val < signal->pixel_clock.val) { + dev_err(dev, + "%s: serdes_pix_clk_hz is lower than pix_clk_hz!\n", + __func__); + return -EINVAL; + } + rate = signal->serdes_pixel_clock.val; + } else { + rate = signal->pixel_clock.val; + } + + err = read_property_u32(node, "csi_pixel_bit_depth", &depth); + if (err) { + dev_err(dev, + "%s:csi_pixel_bit_depth property missing.\n", + __func__); + return err; + } + + /* Convert pixel rate to lane data rate */ + rate = rate * depth / signal->num_lanes; + + if (signal->phy_mode == CSI_PHY_MODE_DPHY) { + /* MIPI clock rate */ + signal->mipi_clock.val = rate / 2; + } else if (signal->phy_mode == CSI_PHY_MODE_CPHY) { + /* Symbol rate */ + signal->mipi_clock.val = rate * 7 / 16; + } else { + /* Data rate */ + signal->mipi_clock.val = rate; + } + + err = read_property_u32(node, "cil_settletime", &value); + if (err) + signal->cil_settletime = 0; + else + signal->cil_settletime = value; + + /* initialize default if this prop not available */ + err = of_property_read_string(node, "discontinuous_clk", &temp_str); + if (!err) + signal->discontinuous_clk = + !strncmp(temp_str, "yes", sizeof("yes")); + else + signal->discontinuous_clk = 1; + + /* initialize default if this prop not available */ + err = of_property_read_string(node, "dpcm_enable", &temp_str); + if (!err) + signal->dpcm_enable = + !strncmp(temp_str, "true", sizeof("true")); + else + signal->dpcm_enable = 0; + + /* initialize default if this prop not available */ + err = of_property_read_string(node, + "deskew_initial_enable", &temp_str); + if (!err) + signal->deskew_initial_enable = + !strncmp(temp_str, "true", sizeof("true")); + else + signal->deskew_initial_enable = 0; + err = of_property_read_string(node, + "deskew_periodic_enable", &temp_str); + if (!err) + signal->deskew_periodic_enable = + !strncmp(temp_str, "true", sizeof("true")); + else + signal->deskew_periodic_enable = 0; + + err = of_property_read_string(node, "tegra_sinterface", &temp_str); + if (err) { + dev_err(dev, + "%s: tegra_sinterface property missing\n", __func__); + return err; + } + + if (strcmp(temp_str, "serial_a") == 0) + signal->tegra_sinterface = 0; + else if (strcmp(temp_str, "serial_b") == 0) + signal->tegra_sinterface = 1; + else if (strcmp(temp_str, "serial_c") == 0) + signal->tegra_sinterface = 2; + else if (strcmp(temp_str, "serial_d") == 0) + signal->tegra_sinterface = 3; + else if (strcmp(temp_str, "serial_e") == 0) + signal->tegra_sinterface = 4; + else if (strcmp(temp_str, "serial_f") == 0) + signal->tegra_sinterface = 5; + else if (strcmp(temp_str, "serial_g") == 0) + signal->tegra_sinterface = 6; + else if (strcmp(temp_str, "serial_h") == 0) + signal->tegra_sinterface = 7; + else if (strcmp(temp_str, "host") == 0) + signal->tegra_sinterface = 0; /* for vivid driver */ + else { + dev_err(dev, + "%s: tegra_sinterface property out of range\n", + __func__); + return -EINVAL; + } + + return 0; +} + +static int extract_pixel_format( + const char *pixel_t, u32 *format) +{ + size_t size = strnlen(pixel_t, OF_MAX_STR_LEN); + + if (strncmp(pixel_t, "bayer_bggr10", size) == 0) + *format = V4L2_PIX_FMT_SBGGR10; + else if (strncmp(pixel_t, "bayer_rggb10", size) == 0) + *format = V4L2_PIX_FMT_SRGGB10; + else if (strncmp(pixel_t, "bayer_grbg10", size) == 0) + *format = V4L2_PIX_FMT_SGRBG10; + else if (strncmp(pixel_t, "bayer_gbrg10", size) == 0) + *format = V4L2_PIX_FMT_SGBRG10; + else if (strncmp(pixel_t, "bayer_bggr12", size) == 0) + *format = V4L2_PIX_FMT_SBGGR12; + else if (strncmp(pixel_t, "bayer_rggb12", size) == 0) + *format = V4L2_PIX_FMT_SRGGB12; + else if (strncmp(pixel_t, "bayer_gbrg12", size) == 0) + *format = V4L2_PIX_FMT_SGBRG12; + else if (strncmp(pixel_t, "bayer_grbg12", size) == 0) + *format = V4L2_PIX_FMT_SGRBG12; + else if (strncmp(pixel_t, "rgb_rgb88824", size) == 0) + *format = V4L2_PIX_FMT_RGB24; + else if (strncmp(pixel_t, "bayer_wdr_pwl_rggb12", size) == 0) + *format = V4L2_PIX_FMT_SRGGB12; + else if (strncmp(pixel_t, "bayer_wdr_pwl_gbrg12", size) == 0) + *format = V4L2_PIX_FMT_SGBRG12; + else if (strncmp(pixel_t, "bayer_wdr_pwl_grbg12", size) == 0) + *format = V4L2_PIX_FMT_SGRBG12; + else if (strncmp(pixel_t, "bayer_wdr_dol_rggb10", size) == 0) + *format = V4L2_PIX_FMT_SRGGB10; +#if 0 /* disable for Canonical kenrel */ + else if (strncmp(pixel_t, "bayer_xbggr10p", size) == 0) + *format = V4L2_PIX_FMT_XBGGR10P; + else if (strncmp(pixel_t, "bayer_xrggb10p", size) == 0) + *format = V4L2_PIX_FMT_XRGGB10P; +#endif + else if (strncmp(pixel_t, "yuv_yuyv16", size) == 0) + *format = V4L2_PIX_FMT_YUYV; + else if (strncmp(pixel_t, "yuv_yvyu16", size) == 0) + *format = V4L2_PIX_FMT_YVYU; + else if (strncmp(pixel_t, "yuv_uyvy16", size) == 0) + *format = V4L2_PIX_FMT_UYVY; + else if (strncmp(pixel_t, "yuv_vyuy16", size) == 0) + *format = V4L2_PIX_FMT_VYUY; + else { + pr_err("%s: Need to extend format%s\n", __func__, pixel_t); + return -EINVAL; + } + + return 0; +} + +static int sensor_common_parse_image_props( + struct device *dev, struct device_node *node, + struct sensor_image_properties *image) +{ + const char *temp_str; + int err = 0, ret = 0; + const char *phase_str, *mode_str; + int depth; + char pix_format[24]; + u32 value = 0; + + err = read_property_u32(node, "active_w", + &image->width); + if (err) { + dev_err(dev, "%s:active_w property missing\n", __func__); + goto fail; + } + + err = read_property_u32(node, "active_h", + &image->height); + if (err) { + dev_err(dev, "%s:active_h property missing\n", __func__); + goto fail; + } + + err = read_property_u32(node, "line_length", + &image->line_length); + if (err) { + dev_err(dev, "%s:Line length property missing\n", __func__); + goto fail; + } + + /* embedded_metadata_height is optional */ + err = read_property_u32(node, "embedded_metadata_height", &value); + if (err) + image->embedded_metadata_height = 0; + else + image->embedded_metadata_height = value; + + err = of_property_read_string(node, "pixel_t", &temp_str); + if (err) { + /* pixel_t missing is only an error if alternate not provided */ + + /* check for alternative format string */ + err = of_property_read_string(node, "pixel_phase", &phase_str); + if (err) { + dev_err(dev, + "%s:pixel_phase property missing.\n", + __func__); + dev_err(dev, + "%s:Either pixel_t or alternate must be present.\n", + __func__); + goto fail; + } + err = of_property_read_string(node, "mode_type", &mode_str); + if (err) { + dev_err(dev, + "%s:mode_type property missing.\n", + __func__); + dev_err(dev, + "%s:Either pixel_t or alternate must be present.\n", + __func__); + goto fail; + } + err = read_property_u32(node, "csi_pixel_bit_depth", &depth); + if (err) { + dev_err(dev, + "%s:csi_pixel_bit_depth property missing.\n", + __func__); + dev_err(dev, + "%s:Either pixel_t or alternate must be present.\n", + __func__); + goto fail; + } + ret = sprintf(pix_format, "%s_%s%d", mode_str, phase_str, depth); + if (ret < 0) + return -EINVAL; + temp_str = pix_format; + } + + err = extract_pixel_format(temp_str, &image->pixel_format); + if (err) { + dev_err(dev, "Unsupported pixel format\n"); + goto fail; + } + +fail: + return err; +} + +static int sensor_common_parse_dv_timings( + struct device *dev, struct device_node *node, + struct sensor_dv_timings *timings) +{ + int err = 0; + u32 value = 0; + + /* Do not report error for these properties yet */ + err = read_property_u32(node, "horz_front_porch", &value); + if (err) + timings->hfrontporch = 0; + else + timings->hfrontporch = value; + + err = read_property_u32(node, "horz_sync", &value); + if (err) + timings->hsync = 0; + else + timings->hsync = value; + + err = read_property_u32(node, "horz_back_porch", &value); + if (err) + timings->hbackporch = 0; + else + timings->hbackporch = value; + + err = read_property_u32(node, "vert_front_porch", &value); + if (err) + timings->vfrontporch = 0; + else + timings->vfrontporch = value; + + err = read_property_u32(node, "vert_sync", &value); + if (err) + timings->vsync = 0; + else + timings->vsync = value; + + err = read_property_u32(node, "vert_back_porch", &value); + if (err) + timings->vbackporch = 0; + else + timings->vbackporch = value; + + return 0; +} + +static int sensor_common_parse_control_props( + struct device *dev, struct device_node *node, + struct sensor_control_properties *control) +{ + int err = 0; + u32 value = 0; + u64 val64 = 0; + + err = read_property_u32(node, "gain_factor", &value); + if (err) { + dev_dbg(dev, "%s:%s:property missing\n", + __func__, "gain_factor"); + control->gain_factor = 1; + return 0; + } else + control->gain_factor = value; + + err = read_property_u32(node, "framerate_factor", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "framerate_factor"); + control->framerate_factor = 1; + } else + control->framerate_factor = value; + + err = read_property_u32(node, "exposure_factor", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "exposure_factor"); + control->exposure_factor = 1; + } else + control->exposure_factor = value; + + /* ignore err for this prop */ + err = read_property_u32(node, "inherent_gain", &value); + if (err) + control->inherent_gain = 0; + else + control->inherent_gain = value; + + err = read_property_u32(node, "min_gain_val", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "min_gain_val"); + control->min_gain_val = 0; + } else + control->min_gain_val = value; + + err = read_property_u32(node, "max_gain_val", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "max_gain_val"); + control->max_gain_val = 0; + } else + control->max_gain_val = value; + + err = read_property_u32(node, "step_gain_val", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "step_gain_val"); + control->step_gain_val = 0; + } else + control->step_gain_val = value; + + /* ignore err for this prop */ + err = read_property_u32(node, "min_hdr_ratio", &value); + if (err) + control->min_hdr_ratio = 1; + else + control->min_hdr_ratio = value; + + err = read_property_u32(node, "max_hdr_ratio", &value); + if (err) + control->max_hdr_ratio = 1; + else + control->max_hdr_ratio = value; + + err = read_property_u32(node, "min_framerate", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "min_framerate"); + control->min_framerate = 0; + } else + control->min_framerate = value; + + err = read_property_u32(node, "max_framerate", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "max_framerate"); + control->max_framerate = 0; + } else + control->max_framerate = value; + + err = read_property_u32(node, "step_framerate", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "step_framerate"); + control->step_framerate = 0; + } else + control->step_framerate = value; + + err = read_property_u64(node, "min_exp_time", &val64); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "min_exp_time"); + control->min_exp_time.val = 0; + } + control->min_exp_time.val = val64; + + err = read_property_u64(node, "max_exp_time", &val64); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "max_exp_time"); + control->max_exp_time.val = 0; + } else + control->max_exp_time.val = val64; + + err = read_property_u64(node, "step_exp_time", &val64); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "step_exp_time"); + control->step_exp_time.val = 0; + } else + control->step_exp_time.val = val64; + + err = read_property_u32(node, "default_gain", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "default_gain"); + control->default_gain = 0; + } else + control->default_gain = value; + + err = read_property_u32(node, "default_framerate", &value); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "default_framerate"); + control->default_framerate = 0; + } else + control->default_framerate = value; + + err = read_property_u64(node, "default_exp_time", &val64); + if (err) { + dev_err(dev, "%s:%s:property missing\n", + __func__, "default_exp_time"); + control->default_exp_time.val = 0; + } else + control->default_exp_time.val = val64; + + err = read_property_u32(node, "is_interlaced", &value); + if (err) + control->is_interlaced = 0; + else + control->is_interlaced = value; + + err = read_property_u32(node, "interlaced_type", &value); + if (err) + control->interlace_type = 0; + else + control->interlace_type = value; + + return 0; +} + +int sensor_common_parse_num_modes(const struct device *dev) +{ + struct device_node *np; + struct device_node *node = NULL; + char temp_str[OF_MAX_STR_LEN]; + int num_modes = 0; + int i, ret; + + if (!dev || !dev->of_node) + return 0; + + np = dev->of_node; + + for (i = 0; num_modes < MAX_NUM_SENSOR_MODES; i++) { + ret = snprintf(temp_str, sizeof(temp_str), "%s%d", + OF_SENSORMODE_PREFIX, i); + if (ret < 0) + return 0; + node = of_get_child_by_name(np, temp_str); + of_node_put(node); + if (node == NULL) + break; + num_modes++; + } + + return num_modes; +} +EXPORT_SYMBOL(sensor_common_parse_num_modes); + +static int sensor_common_init_i2c_device_config( + struct device *dev, struct device_node *np, + struct sensor_cfg *cfg) +{ + struct i2c_sensor_cfg *i2c_sensor = &cfg->u.i2c_sensor; + struct device_node *node = NULL; + struct device_node *parent = NULL; + int err = 0; + u32 value = 0; + bool is_mux_valid = 0; + + cfg->type = CAMERA_DEVICE_I2C_SENSOR; + err = of_property_read_u32(np, "reg", &value); + if (err) { + dev_err(dev, "sensor address unavailable\n"); + return err; + } + + /* Reading more devices has to be supported */ + i2c_sensor->num_devs = 1; + i2c_sensor->sd[0].addr = value; + + parent = of_get_parent(np); + /* verify the parent is mux or i2c bus */ + is_mux_valid = + of_property_read_bool(parent, "i2c-mux,deselect-on-exit"); + i2c_sensor->mux.is_mux_valid = is_mux_valid; + + if (is_mux_valid) { + + /* at mux port read the mux channel */ + err = of_property_read_u32(parent, "reg", &value); + if (err) { + dev_err(dev, "mux channel unavailable\n"); + return err; + } + i2c_sensor->mux.mux_channel = value; + + /* move to mux node */ + node = of_get_parent(parent); + of_node_put(parent); + err = of_property_read_u32(node, "reg", &value); + if (err) { + dev_err(dev, "mux address unavailable\n"); + return err; + } + i2c_sensor->mux.mux_addr = value; + + /* move to i2c bus node */ + parent = of_get_parent(node); + of_node_put(node); + } else { + /* move to next parent to check + * if it is a gpio based i2c mux + */ + node = of_get_parent(parent); + + if (of_device_is_compatible(node, "i2c-mux-gpio")) { + of_node_put(parent); + + /* move to i2c bus node */ + parent = of_parse_phandle(node, "i2c-parent", 0); + } + } + + /* read parent which is i2c bus */ + err = of_property_read_u32_index(parent, "reg", 1, &value); + if (err) { + dev_err(dev, "i2c bus regbase unavailable\n"); + return err; + } + i2c_sensor->bus.reg_base = value; + + err = of_property_read_u32(parent, "clock-frequency", &value); + if (err) { + dev_err(dev, "bus clock frequency unavailable\n"); + return err; + } + i2c_sensor->bus.clk_rate = value; + + of_node_put(parent); + /* + * Read any additional flags to configure I2C for any + * special properties of the device like-high-speed mode, + * 10bit addressing etc., + */ + + return 0; +} + +static int sensor_common_init_spi_device_config( + struct device *dev, struct device_node *np, + struct sensor_cfg *cfg) +{ + struct spi_sensor_cfg *spi_sensor = &cfg->u.spi_sensor; + struct device_node *parent = NULL; + int err = 0; + u32 value = 0; + + cfg->type = CAMERA_DEVICE_SPI_SENSOR; + err = of_property_read_u32(np, "reg", &value); + if (err) { + dev_err(dev, "sensor address unavailable\n"); + return err; + } + + /* Reading more devices has to be supported */ + spi_sensor->num_devs = 1; + spi_sensor->sd[0].addr = value; + + parent = of_get_parent(np); + + /* TODO: Add logic for spi mux if available */ + + /* read parent which is spi bus */ + err = of_property_read_u32_index(parent, "reg", 1, &value); + if (err) { + dev_err(dev, "spi bus regbase unavailable\n"); + return err; + } + spi_sensor->bus.reg_base = value; + + err = of_property_read_u32(parent, "spi-max-frequency", &value); + if (err) { + dev_err(dev, "bus clock frequency unavailable\n"); + return err; + } + spi_sensor->bus.clk_rate = value; + + of_node_put(parent); + /* Read any additional flags to configure SPI */ + + return 0; +} + +static int sensor_common_init_device_config( + struct device *dev, struct device_node *np, + struct sensor_cfg *cfg) +{ + struct device_node *parent = NULL; + char *tmp; + int err = 0; + + if (!np) + return -EINVAL; + + parent = of_get_parent(np); + if (!parent) + return -EINVAL; + + tmp = strnstr(parent->name, "i2c", 4); + if (tmp != NULL) { + err = sensor_common_init_i2c_device_config(dev, np, cfg); + if (err) + goto exit; + } + + tmp = strnstr(parent->name, "spi", 4); + if (tmp != NULL) { + err = sensor_common_init_spi_device_config(dev, np, cfg); + if (err) + goto exit; + } + +exit: + of_node_put(parent); + return err; +} + +int sensor_common_init_sensor_properties( + struct device *dev, struct device_node *np, + struct sensor_properties *sensor) +{ + char temp_str[OF_MAX_STR_LEN]; + struct device_node *node = NULL; + int num_modes = 0; + int err, i; + + if (sensor == NULL) + return -EINVAL; + + err = sensor_common_init_device_config(dev, np, &sensor->cfg); + if (err) + return err; + + /* get number of modes */ + for (i = 0; num_modes < MAX_NUM_SENSOR_MODES; i++) { + err = snprintf(temp_str, sizeof(temp_str), "%s%d", + OF_SENSORMODE_PREFIX, i); + if (err < 0) + return -EINVAL; + + node = of_get_child_by_name(np, temp_str); + of_node_put(node); + if (node == NULL) + break; + num_modes++; + } + sensor->num_modes = num_modes; + + sensor->sensor_modes = devm_kzalloc(dev, + num_modes * sizeof(struct sensor_mode_properties), + GFP_KERNEL); + if (!sensor->sensor_modes) { + dev_err(dev, "Failed to allocate memory for sensor modes\n"); + err = -ENOMEM; + goto alloc_fail; + } + + for (i = 0; i < num_modes; i++) { + err = snprintf(temp_str, sizeof(temp_str), "%s%d", + OF_SENSORMODE_PREFIX, i); + if (err < 0) + return -EINVAL; + + node = of_get_child_by_name(np, temp_str); + if (node == NULL) { + dev_err(dev, "Failed to find %s\n", temp_str); + err = -ENODATA; + goto fail; + }; + + dev_dbg(dev, "parsing for %s props\n", temp_str); + + err = sensor_common_parse_signal_props(dev, node, + &sensor->sensor_modes[i].signal_properties); + if (err) { + dev_err(dev, "Failed to read %s signal props\n", + temp_str); + goto fail; + } + + err = sensor_common_parse_image_props(dev, node, + &sensor->sensor_modes[i].image_properties); + if (err) { + dev_err(dev, "Failed to read %s image props\n", + temp_str); + goto fail; + } + + err = sensor_common_parse_dv_timings(dev, node, + &sensor->sensor_modes[i].dv_timings); + if (err) { + dev_err(dev, "Failed to read %s DV timings\n", + temp_str); + goto fail; + } + + err = sensor_common_parse_control_props(dev, node, + &sensor->sensor_modes[i].control_properties); + if (err) { + dev_err(dev, "Failed to read %s control props\n", + temp_str); + goto fail; + } + of_node_put(node); + } + + return 0; + +fail: + devm_kfree(dev, sensor->sensor_modes); +alloc_fail: + of_node_put(node); + return err; +} +EXPORT_SYMBOL(sensor_common_init_sensor_properties); diff --git a/drivers/media/platform/tegra/camera/tegracam_core.c b/drivers/media/platform/tegra/camera/tegracam_core.c new file mode 100644 index 00000000..142e53d8 --- /dev/null +++ b/drivers/media/platform/tegra/camera/tegracam_core.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * tegracam_core - tegra camera framework initialization + * + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include +#include +#include +#include +#include + +struct tegracam_device_entry { + struct tegracam_device *tc_dev; + struct list_head list; +}; + +static struct list_head tc_device_list_head = + LIST_HEAD_INIT(tc_device_list_head); +static DEFINE_MUTEX(tc_device_list_mutex); + +/* use semantic versioning convention */ +#define TEGRACAM_MAJOR_VERSION 2 +#define TEGRACAM_MINOR_VERSION 0 +#define TEGRACAM_PATCH_VERSION 6 + +u32 tegracam_version(u8 major, u8 minor, u8 patch) +{ + return ((major << 16) | (minor << 8) | patch); +} +EXPORT_SYMBOL_GPL(tegracam_version); + +u32 tegracam_query_version(const char *of_dev_name) +{ + struct tegracam_device_entry *entry = NULL; + struct device_node *node; + u32 version = 0; + + if (of_dev_name == NULL) + return 0; + + mutex_lock(&tc_device_list_mutex); + list_for_each_entry(entry, &tc_device_list_head, list) { + node = entry->tc_dev->dev->of_node; + if (strcmp(of_dev_name, node->name) == 0) { + version = entry->tc_dev->version; + break; + } + } + mutex_unlock(&tc_device_list_mutex); + + return version; +} +EXPORT_SYMBOL_GPL(tegracam_query_version); + +struct tegracam_device *to_tegracam_device(struct camera_common_data *data) +{ + /* fix this by moving subdev to base struct */ + return (struct tegracam_device *)data->tegracam_ctrl_hdl->tc_dev; +} +EXPORT_SYMBOL_GPL(to_tegracam_device); + +void tegracam_set_privdata(struct tegracam_device * tc_dev, void *priv) +{ + tc_dev->priv = priv; + + /* TODO: cleanup needed for priv once sensors adapt this driver */ + tc_dev->s_data->priv = priv; +} +EXPORT_SYMBOL_GPL(tegracam_set_privdata); + +void *tegracam_get_privdata(struct tegracam_device *tc_dev) +{ + return tc_dev->priv; +} +EXPORT_SYMBOL_GPL(tegracam_get_privdata); + +int tegracam_device_register(struct tegracam_device *tc_dev) +{ + struct device *dev = tc_dev->dev; + struct tegracam_ctrl_handler *ctrl_hdl = NULL; + struct tegracam_device_entry *tc_dev_entry = NULL; + struct camera_common_power_rail *pw_rail = NULL; + struct camera_common_data *s_data = NULL; + struct sensor_mode_properties *sensor_mode = NULL; + struct sensor_signal_properties *signal_props = NULL; + struct sensor_image_properties *image_props = NULL; + u32 mode_idx = 0; + int err = 0; + + s_data = devm_kzalloc(dev, + sizeof(struct camera_common_data), GFP_KERNEL); + s_data->dev = dev; + + ctrl_hdl = devm_kzalloc(dev, + sizeof(struct tegracam_ctrl_handler), GFP_KERNEL); + ctrl_hdl->tc_dev = tc_dev; + s_data->tegracam_ctrl_hdl = ctrl_hdl; + + pw_rail = devm_kzalloc(dev, + sizeof(struct camera_common_power_rail), GFP_KERNEL); + s_data->power = pw_rail; + + s_data->regmap = devm_regmap_init_i2c(tc_dev->client, + tc_dev->dev_regmap_config); + if (IS_ERR(s_data->regmap)) { + dev_err(dev, + "regmap init failed: %ld\n", PTR_ERR(s_data->regmap)); + return -ENODEV; + } + + if (!tc_dev->sensor_ops) { + dev_err(dev, "sensor ops not initialized\n"); + return -EINVAL; + } + s_data->ops = tc_dev->sensor_ops; + + s_data->pdata = tc_dev->sensor_ops->parse_dt(tc_dev); + if (PTR_ERR(s_data->pdata) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!s_data->pdata) { + dev_err(dev, "unable to get platform data\n"); + return -EFAULT; + } + tc_dev->s_data = s_data; + err = tc_dev->sensor_ops->power_get(tc_dev); + if (err) { + dev_err(dev, "unable to power get\n"); + return -EFAULT; + } + + err = camera_common_initialize(s_data, tc_dev->name); + if (err) { + dev_err(dev, "Failed to initialize %s\n", tc_dev->name); + return err; + } + + /* TODO: updated default mode from DT ?? */ + mode_idx = s_data->mode_prop_idx = 0; + /* init format context */ + /*TODO: compile frmfmt array from DT */ + s_data->frmfmt = tc_dev->sensor_ops->frmfmt_table; + s_data->numfmts = tc_dev->sensor_ops->numfrmfmts; + sensor_mode = &s_data->sensor_props.sensor_modes[mode_idx]; + signal_props = &sensor_mode->signal_properties; + image_props = &sensor_mode->image_properties; + + s_data->def_mode = s_data->frmfmt[mode_idx].mode; + s_data->colorfmt = + camera_common_find_pixelfmt(image_props->pixel_format); + s_data->def_width = s_data->fmt_width = + s_data->frmfmt[mode_idx].size.width; + s_data->def_height = s_data->fmt_height = + s_data->frmfmt[mode_idx].size.height; + s_data->def_clk_freq = signal_props->mclk_freq * 1000; + + /* add version info to identify the right feature set */ + tc_dev->version = tegracam_version(TEGRACAM_MAJOR_VERSION, + TEGRACAM_MINOR_VERSION, TEGRACAM_PATCH_VERSION); + s_data->version = tc_dev->version; + + /* Add tc_dev to list of registered devices */ + tc_dev_entry = devm_kzalloc(dev, + sizeof(*tc_dev_entry), GFP_KERNEL); + tc_dev_entry->tc_dev = tc_dev; + INIT_LIST_HEAD(&tc_dev_entry->list); + mutex_lock(&tc_device_list_mutex); + list_add(&tc_dev_entry->list, &tc_device_list_head); + mutex_unlock(&tc_device_list_mutex); + + dev_info(dev, "tegracam sensor driver:%s_v%d.%d.%d\n", + tc_dev->name, TEGRACAM_MAJOR_VERSION, + TEGRACAM_MINOR_VERSION, TEGRACAM_PATCH_VERSION); + + return 0; +} +EXPORT_SYMBOL_GPL(tegracam_device_register); + +void tegracam_device_unregister(struct tegracam_device *tc_dev) +{ + struct tegracam_device_entry *entry; + struct tegracam_device_entry *temp; + struct camera_common_data *s_data = tc_dev->s_data; + + tc_dev->sensor_ops->power_put(tc_dev); + camera_common_cleanup(s_data); + + /* Remove tc_dev from list of registered devices */ + mutex_lock(&tc_device_list_mutex); + list_for_each_entry_safe(entry, temp, &tc_device_list_head, list) { + if (entry->tc_dev == tc_dev) { + list_del(&entry->list); + break; + } + } + mutex_unlock(&tc_device_list_mutex); + devm_kfree(tc_dev->dev, entry); + devm_kfree(tc_dev->dev, tc_dev->s_data->tegracam_ctrl_hdl); + devm_kfree(tc_dev->dev, tc_dev->s_data->power); + devm_kfree(tc_dev->dev, tc_dev->s_data); + tc_dev->s_data = NULL; +} +EXPORT_SYMBOL_GPL(tegracam_device_unregister); diff --git a/drivers/media/platform/tegra/camera/tegracam_ctrls.c b/drivers/media/platform/tegra/camera/tegracam_ctrls.c new file mode 100644 index 00000000..a1b8ed7a --- /dev/null +++ b/drivers/media/platform/tegra/camera/tegracam_ctrls.c @@ -0,0 +1,1026 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * tegracam_ctrls - control framework for tegra camera drivers + * + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#define CTRL_U32_MIN 0 +#define CTRL_U32_MAX 0x7FFFFFFF +#define CTRL_U64_MIN 0 +#define CTRL_U64_MAX 0x7FFFFFFFFFFFFFFFLL +#define CTRL_S32_MIN 0x80000000 +#define CTRL_S32_MAX 0x7FFFFFFF +#define CTRL_S64_MIN 0x8000000000000000LL +#define CTRL_S64_MAX 0x7FFFFFFFFFFFFFFFLL +#define CTRL_MAX_STR_SIZE 4096 +#define STEREO_EEPROM_SIZE 4096 + +#define TEGRACAM_DEF_CTRLS 1 + +static int tegracam_s_ctrl(struct v4l2_ctrl *ctrl); +static const struct v4l2_ctrl_ops tegracam_ctrl_ops = { + .s_ctrl = tegracam_s_ctrl, +}; + +static const u32 tegracam_def_cids[] = { + TEGRA_CAMERA_CID_GROUP_HOLD, +}; + +/* + * For auto control, the states of the previous controls must + * be applied to get optimal quality faster. List all the controls + * which must be overriden + */ +static const u32 tegracam_override_cids[] = { + TEGRA_CAMERA_CID_GAIN, + TEGRA_CAMERA_CID_EXPOSURE, + TEGRA_CAMERA_CID_FRAME_RATE, +}; +#define NUM_OVERRIDE_CTRLS ARRAY_SIZE(tegracam_override_cids) + +static struct v4l2_ctrl_config ctrl_cfg_list[] = { +/* Do not change the name field for the controls! */ + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_GAIN, + .name = "Gain", + .type = V4L2_CTRL_TYPE_INTEGER64, + .flags = V4L2_CTRL_FLAG_SLIDER, + .min = CTRL_U64_MIN, + .max = CTRL_U64_MAX, + .def = CTRL_U64_MIN, + .step = 1, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_EXPOSURE, + .name = "Exposure", + .type = V4L2_CTRL_TYPE_INTEGER64, + .flags = V4L2_CTRL_FLAG_SLIDER, + .min = CTRL_U64_MIN, + .max = CTRL_U64_MAX, + .def = CTRL_U64_MIN, + .step = 1, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_EXPOSURE_SHORT, + .name = "Exposure Short", + .type = V4L2_CTRL_TYPE_INTEGER64, + .flags = V4L2_CTRL_FLAG_SLIDER, + .min = CTRL_U64_MIN, + .max = CTRL_U64_MAX, + .def = CTRL_U64_MIN, + .step = 1, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_FRAME_RATE, + .name = "Frame Rate", + .type = V4L2_CTRL_TYPE_INTEGER64, + .flags = V4L2_CTRL_FLAG_SLIDER, + .min = CTRL_U64_MIN, + .max = CTRL_U64_MAX, + .def = CTRL_U64_MIN, + .step = 1, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_GROUP_HOLD, + .name = "Group Hold", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .flags = V4L2_CTRL_FLAG_EXECUTE_ON_WRITE, + .min = 0, + .max = 1, + .def = 0, + .step = 1, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_EEPROM_DATA, + .name = "EEPROM Data", + .type = V4L2_CTRL_TYPE_STRING, + .flags = V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = CTRL_MAX_STR_SIZE, + .step = 2, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_FUSE_ID, + .name = "Fuse ID", + .type = V4L2_CTRL_TYPE_STRING, + .flags = V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = CTRL_MAX_STR_SIZE, + .step = 2, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_MODE_ID, + .name = "Sensor Mode", + .type = V4L2_CTRL_TYPE_INTEGER64, + .flags = V4L2_CTRL_FLAG_SLIDER, + .min = CTRL_U32_MIN, + .max = CTRL_U32_MAX, + .def = CTRL_U32_MIN, + .step = 1, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_HDR_EN, + .name = "HDR enable", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .min = 0, + .max = ARRAY_SIZE(switch_ctrl_qmenu) - 1, + .menu_skip_mask = 0, + .def = 0, + .qmenu_int = switch_ctrl_qmenu, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_OTP_DATA, + .name = "OTP Data", + .type = V4L2_CTRL_TYPE_STRING, + .flags = V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = CTRL_MAX_STR_SIZE, + .step = 2, + }, + { + .ops = &tegracam_ctrl_ops, + .id = TEGRA_CAMERA_CID_STEREO_EEPROM, + .name = "Stereo EEPROM", + .type = V4L2_CTRL_COMPOUND_TYPES, + .flags = V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = STEREO_EEPROM_SIZE, + .step = 2, + }, +}; + +static int tegracam_get_ctrl_index(u32 cid) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ctrl_cfg_list); i++) { + if (ctrl_cfg_list[i].id == cid) + return i; + } + + return -EINVAL; +} + +static int tegracam_get_string_ctrl_size(u32 cid, + const struct tegracam_ctrl_ops *ops) +{ + u32 index = 0; + + switch (cid) { + case TEGRA_CAMERA_CID_EEPROM_DATA: + index = TEGRA_CAM_STRING_CTRL_EEPROM_INDEX; + break; + case TEGRA_CAMERA_CID_FUSE_ID: + index = TEGRA_CAM_STRING_CTRL_FUSEID_INDEX; + break; + case TEGRA_CAMERA_CID_OTP_DATA: + index = TEGRA_CAM_STRING_CTRL_OTP_INDEX; + break; + default: + return -EINVAL; + } + + return ops->string_ctrl_size[index]; +} +static int tegracam_get_compound_ctrl_size(u32 cid, + const struct tegracam_ctrl_ops *ops) +{ + u32 index = 0; + + switch (cid) { + case TEGRA_CAMERA_CID_STEREO_EEPROM: + index = TEGRA_CAM_COMPOUND_CTRL_EEPROM_INDEX; + break; + default: + return -EINVAL; + } + return ops->compound_ctrl_size[index]; +} +static int tegracam_setup_string_ctrls(struct tegracam_device *tc_dev, + struct tegracam_ctrl_handler *handler) +{ + const struct tegracam_ctrl_ops *ops = handler->ctrl_ops; + u32 numctrls = 0; + int i; + int err = 0; + + if (ops == NULL) + return 0; + + numctrls = ops->numctrls; + + for (i = 0; i < numctrls; i++) { + struct v4l2_ctrl *ctrl = handler->ctrls[i]; + + if (ctrl->type == V4L2_CTRL_TYPE_STRING) { + err = ops->fill_string_ctrl(tc_dev, ctrl); + if (err) + return err; + } + } + + spec_bar(); + + return 0; +} + +static int tegracam_setup_compound_ctrls(struct tegracam_device *tc_dev, + struct tegracam_ctrl_handler *handler) +{ + const struct tegracam_ctrl_ops *ops = handler->ctrl_ops; + u32 numctrls = 0; + int i; + int err = 0; + + if (ops == NULL) + return 0; + + numctrls = ops->numctrls; + + for (i = 0; i < numctrls; i++) { + struct v4l2_ctrl *ctrl = handler->ctrls[i]; + + if (ctrl->type == V4L2_CTRL_COMPOUND_TYPES) { + err = ops->fill_compound_ctrl(tc_dev, ctrl); + if (err) + return err; + } + } + + spec_bar(); /* break_spec_#5_1 */ + + return 0; +} + +static int tegracam_set_ctrls(struct tegracam_ctrl_handler *handler, + struct v4l2_ctrl *ctrl) +{ + const struct tegracam_ctrl_ops *ops = handler->ctrl_ops; + struct tegracam_device *tc_dev = handler->tc_dev; + struct camera_common_data *s_data = tc_dev->s_data; + int err = 0; + u32 status = 0; + + /* For controls that are independent of power state */ + switch (ctrl->id) { + case TEGRA_CAMERA_CID_SENSOR_MODE_ID: + s_data->sensor_mode_id = (int) (*ctrl->p_new.p_s64); + return 0; + case TEGRA_CAMERA_CID_HDR_EN: + return 0; + } + + if (v4l2_subdev_call(&s_data->subdev, video, + g_input_status, &status)) { + dev_err(s_data->dev, "power status query unsupported\n"); + return -ENOTTY; + } + + /* power state is turned off, do not program sensor now */ + if (!status) + return 0; + + /* For controls that require sensor to be on */ + switch (ctrl->id) { + case TEGRA_CAMERA_CID_GAIN: + err = ops->set_gain(tc_dev, *ctrl->p_new.p_s64); + break; + case TEGRA_CAMERA_CID_FRAME_RATE: + err = ops->set_frame_rate(tc_dev, *ctrl->p_new.p_s64); + break; + case TEGRA_CAMERA_CID_EXPOSURE: + err = ops->set_exposure(tc_dev, *ctrl->p_new.p_s64); + break; + case TEGRA_CAMERA_CID_EXPOSURE_SHORT: + err = ops->set_exposure_short(tc_dev, *ctrl->p_new.p_s64); + break; + case TEGRA_CAMERA_CID_GROUP_HOLD: + err = ops->set_group_hold(tc_dev, ctrl->val); + break; + default: + pr_err("%s: unknown ctrl id.\n", __func__); + return -EINVAL; + } + + return err; +} + +static int tegracam_set_grouphold_ex(struct tegracam_device *tc_dev, + struct sensor_blob *blob, + bool status) +{ + const struct tegracam_ctrl_ops *ops = tc_dev->tcctrl_ops; + struct camera_common_data *s_data = tc_dev->s_data; + int err = 0; + + /* + * when grouphold is set, reset control blob + * set grouphold register using set API + * start packetize commands for delivering the blob + * when grouphold is unset, unset grouphold register + * and write the blob only if sensor is streaming. + */ + if (status) { + memset(blob, 0, sizeof(struct sensor_blob)); + err = ops->set_group_hold_ex(tc_dev, blob, status); + if (err) + return err; + } else { + err = ops->set_group_hold_ex(tc_dev, blob, status); + if (err) + return err; + + /* TODO: block this write selectively from VI5 */ + if (tc_dev->is_streaming) { + err = write_sensor_blob(s_data->regmap, blob); + if (err) + return err; + } + } + + return 0; +} + +static int tegracam_set_ctrls_ex(struct tegracam_ctrl_handler *handler, + struct v4l2_ctrl *ctrl) +{ + const struct tegracam_ctrl_ops *ops = handler->ctrl_ops; + struct tegracam_device *tc_dev = handler->tc_dev; + struct camera_common_data *s_data = tc_dev->s_data; + struct tegracam_sensor_data *sensor_data = &handler->sensor_data; + struct sensor_blob *blob = &sensor_data->ctrls_blob; + int err = 0; + + switch (ctrl->id) { + case TEGRA_CAMERA_CID_GAIN: + err = ops->set_gain_ex(tc_dev, blob, *ctrl->p_new.p_s64); + break; + case TEGRA_CAMERA_CID_FRAME_RATE: + err = ops->set_frame_rate_ex(tc_dev, blob, *ctrl->p_new.p_s64); + break; + case TEGRA_CAMERA_CID_EXPOSURE: + err = ops->set_exposure_ex(tc_dev, blob, *ctrl->p_new.p_s64); + break; + case TEGRA_CAMERA_CID_GROUP_HOLD: + err = tegracam_set_grouphold_ex(tc_dev, blob, ctrl->val); + break; + case TEGRA_CAMERA_CID_SENSOR_MODE_ID: + s_data->sensor_mode_id = (int) (*ctrl->p_new.p_s64); + break; + case TEGRA_CAMERA_CID_HDR_EN: + break; + default: + pr_err("%s: unknown ctrl id.\n", __func__); + return -EINVAL; + } + + return err; +} + + +static int tegracam_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct tegracam_ctrl_handler *handler = + container_of(ctrl->handler, + struct tegracam_ctrl_handler, ctrl_handler); + const struct tegracam_ctrl_ops *ops = handler->ctrl_ops; + + if (ops->is_blob_supported) + return tegracam_set_ctrls_ex(handler, ctrl); + else + return tegracam_set_ctrls(handler, ctrl); + + return 0; +} + +int tegracam_ctrl_set_overrides(struct tegracam_ctrl_handler *hdl) +{ + struct v4l2_ext_controls ctrls; + struct v4l2_ext_control control; + struct tegracam_device *tc_dev = hdl->tc_dev; + struct device *dev = tc_dev->dev; + const struct tegracam_ctrl_ops *ops = hdl->ctrl_ops; + struct tegracam_sensor_data *sensor_data = &hdl->sensor_data; + struct sensor_blob *blob = &sensor_data->ctrls_blob; + bool is_blob_supported = false; + int err, result = 0; + int i; + + if (ops == NULL) + return 0; + + is_blob_supported = ops->is_blob_supported; + + /* + * write list of override regs for the asking frame length, + * coarse integration time, and gain. Failures to write + * overrides are non-fatal + */ + memset(&ctrls, 0, sizeof(ctrls)); + ctrls.which = V4L2_CTRL_ID2WHICH(TEGRA_CAMERA_CID_BASE); + ctrls.count = 1; + ctrls.controls = &control; + + for (i = 0; i < NUM_OVERRIDE_CTRLS; i++) { + s64 val = 0; + control.id = tegracam_override_cids[i]; + + result = v4l2_g_ext_ctrls(&hdl->ctrl_handler, tc_dev->s_data->subdev.devnode, NULL, &ctrls); + + if (result == 0) { + val = control.value64; + switch (control.id) { + case TEGRA_CAMERA_CID_GAIN: + if (is_blob_supported) + err = ops->set_gain_ex(tc_dev, + blob, val); + else + err = ops->set_gain(tc_dev, val); + break; + case TEGRA_CAMERA_CID_EXPOSURE: + if (is_blob_supported) + err = ops->set_exposure_ex(tc_dev, + blob, val); + else + err = ops->set_exposure(tc_dev, val); + break; + case TEGRA_CAMERA_CID_FRAME_RATE: + if (is_blob_supported) + err = ops->set_frame_rate_ex(tc_dev, + blob, val); + else + err = ops->set_frame_rate(tc_dev, val); + break; + default: + dev_err(dev, "%s: unsupported override %x\n", + __func__, control.id); + return -EINVAL; + } + + if (err) { + dev_err(dev, "%s: error to set %d override\n", + __func__, control.id); + return err; + } + } + } + + return 0; +} + +int tegracam_init_ctrl_ranges_by_mode( + struct tegracam_ctrl_handler *handler, + u32 modeidx) +{ + struct tegracam_device *tc_dev = handler->tc_dev; + struct camera_common_data *s_data = tc_dev->s_data; + struct sensor_control_properties *ctrlprops = NULL; + s64 min_short_exp_time = 0; + s64 max_short_exp_time = 0; + s64 default_short_exp_time = 0; + int i; + + if (handler->numctrls == 0) + return 0; + + if (modeidx >= s_data->sensor_props.num_modes) + return -EINVAL; + + ctrlprops = + &s_data->sensor_props.sensor_modes[modeidx].control_properties; + + for (i = 0; i < handler->numctrls; i++) { + struct v4l2_ctrl *ctrl = handler->ctrls[i]; + int err = 0; + + switch (ctrl->id) { + case TEGRA_CAMERA_CID_GAIN: + err = v4l2_ctrl_modify_range(ctrl, + ctrlprops->min_gain_val, + ctrlprops->max_gain_val, + ctrlprops->step_gain_val, + ctrlprops->default_gain); + break; + case TEGRA_CAMERA_CID_FRAME_RATE: + err = v4l2_ctrl_modify_range(ctrl, + ctrlprops->min_framerate, + ctrlprops->max_framerate, + ctrlprops->step_framerate, + ctrlprops->default_framerate); + break; + case TEGRA_CAMERA_CID_EXPOSURE: + err = v4l2_ctrl_modify_range(ctrl, + ctrlprops->min_exp_time.val, + ctrlprops->max_exp_time.val, + ctrlprops->step_exp_time.val, + ctrlprops->default_exp_time.val); + break; + case TEGRA_CAMERA_CID_EXPOSURE_SHORT: + /* + * min_hdr_ratio should be equal to max_hdr_ratio. + * This will ensure consistent short exposure + * limit calculations. + */ + min_short_exp_time = + ctrlprops->min_exp_time.val / + ctrlprops->min_hdr_ratio; + max_short_exp_time = + ctrlprops->max_exp_time.val / + ctrlprops->min_hdr_ratio; + default_short_exp_time = + ctrlprops->default_exp_time.val / + ctrlprops->min_hdr_ratio; + err = v4l2_ctrl_modify_range(ctrl, + min_short_exp_time, + max_short_exp_time, + ctrlprops->step_exp_time.val, + default_short_exp_time); + dev_dbg(s_data->dev, + "%s:short_exp_limits[%lld,%lld], default_short_exp_time=%lld\n", + __func__, + min_short_exp_time, + max_short_exp_time, + default_short_exp_time); + break; + default: + /* Not required to modify these control ranges */ + break; + } + + if (err) { + dev_err(s_data->dev, + "ctrl %s range update failed\n", ctrl->name); + return err; + } + } + + spec_bar(); + + return 0; +} +EXPORT_SYMBOL_GPL(tegracam_init_ctrl_ranges_by_mode); + +int tegracam_init_ctrl_ranges(struct tegracam_ctrl_handler *handler) +{ + struct tegracam_device *tc_dev = handler->tc_dev; + struct camera_common_data *s_data = tc_dev->s_data; + struct device *dev = tc_dev->dev; + int i, err = 0; + + /* Updating static control ranges */ + for (i = 0; i < handler->numctrls; i++) { + struct v4l2_ctrl *ctrl = handler->ctrls[i]; + + switch (ctrl->id) { + case TEGRA_CAMERA_CID_SENSOR_MODE_ID: + err = v4l2_ctrl_modify_range(ctrl, + CTRL_U32_MIN, + (s64) s_data->sensor_props.num_modes, + 1, + CTRL_U32_MIN); + break; + default: + /* Not required to modify these control ranges */ + break; + } + + if (err) { + dev_err(s_data->dev, + "ctrl %s range update failed\n", ctrl->name); + return err; + } + } + + spec_bar(); + + /* Use mode 0 control ranges as default */ + if (s_data->sensor_props.num_modes > 0) + { + err = tegracam_init_ctrl_ranges_by_mode(handler, 0); + if (err) { + dev_err(dev, "Error %d updating mode specific control ranges \n", err); + return err; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(tegracam_init_ctrl_ranges); + +static int tegracam_check_ctrl_ops( + struct tegracam_ctrl_handler *handler, int *numctrls) +{ + struct tegracam_device *tc_dev = handler->tc_dev; + struct device *dev = tc_dev->dev; + const struct tegracam_ctrl_ops *ops = handler->ctrl_ops; + const u32 *cids = ops->ctrl_cid_list; + int sensor_ops = 0, sensor_ex_ops = 0, mode_ops = 0, string_ops = 0; + int compound_ops = 0; + int default_ops = 0, default_ex_ops = 0, total_ops = 0; + int i; + + /* Find missing sensor controls */ + for (i = 0; i < ops->numctrls; i++) { + switch (cids[i]) { + case TEGRA_CAMERA_CID_GAIN: + if (ops->set_gain == NULL && ops->set_gain_ex == NULL) + dev_err(dev, + "Missing TEGRA_CAMERA_CID_GAIN implementation\n"); + if (ops->set_gain != NULL) + sensor_ops++; + if (ops->set_gain_ex != NULL) + sensor_ex_ops++; + break; + case TEGRA_CAMERA_CID_EXPOSURE: + if (ops->set_exposure == NULL && + ops->set_exposure_ex == NULL) + dev_err(dev, + "Missing TEGRA_CAMERA_CID_EXPOSURE implementation\n"); + if (ops->set_exposure != NULL) + sensor_ops++; + if (ops->set_exposure_ex != NULL) + sensor_ex_ops++; + break; + case TEGRA_CAMERA_CID_EXPOSURE_SHORT: + if (ops->set_exposure_short == NULL) + dev_err(dev, + "Missing TEGRA_CAMERA_CID_EXPOSURE_SHORT implementation\n"); + else + sensor_ops++; + break; + case TEGRA_CAMERA_CID_FRAME_RATE: + if (ops->set_frame_rate == NULL && + ops->set_frame_rate_ex == NULL) + dev_err(dev, + "Missing TEGRA_CAMERA_CID_FRAME_RATE implementation\n"); + if (ops->set_frame_rate != NULL) + sensor_ops++; + if (ops->set_frame_rate_ex != NULL) + sensor_ex_ops++; + break; + case TEGRA_CAMERA_CID_GROUP_HOLD: + dev_err(dev, + "TEGRA_CAMERA_CID_GROUP_HOLD contorl is enabled in framework by default, no need to add it in driver\n"); + return -EINVAL; + case TEGRA_CAMERA_CID_EEPROM_DATA: + if (tegracam_get_string_ctrl_size( + TEGRA_CAMERA_CID_EEPROM_DATA, ops) == 0) + dev_err(dev, "EEPROM size not specified\n"); + else + string_ops++; + break; + case TEGRA_CAMERA_CID_FUSE_ID: + if (tegracam_get_string_ctrl_size( + TEGRA_CAMERA_CID_FUSE_ID, ops) == 0) + dev_err(dev, "Fuse ID size not specified\n"); + else + string_ops++; + break; + case TEGRA_CAMERA_CID_OTP_DATA: + if (tegracam_get_string_ctrl_size( + TEGRA_CAMERA_CID_OTP_DATA, ops) == 0) + dev_err(dev, "OTP size not specified\n"); + else + string_ops++; + break; + case TEGRA_CAMERA_CID_STEREO_EEPROM: + if (tegracam_get_compound_ctrl_size( + TEGRA_CAMERA_CID_STEREO_EEPROM, ops) == 0) + dev_err(dev, "Stereo EEPROM size not \ + specified\n"); + else + compound_ops++; + break; + + /* The below controls are handled by framework */ + case TEGRA_CAMERA_CID_SENSOR_MODE_ID: + case TEGRA_CAMERA_CID_HDR_EN: + mode_ops++; + break; + default: + break; + } + } + + /* Find missing string controls */ + if (string_ops > 0) { + if (ops->fill_string_ctrl == NULL) { + dev_err(dev, "Missing string control implementation\n"); + string_ops = 0; + } + } + + if (compound_ops > 0) { + if (ops->fill_compound_ctrl == NULL) { + dev_err(dev, "Missing compound control implementation\n"); + compound_ops = 0; + } + } + + /* Find missing default controls */ + for (i = 0; i < TEGRACAM_DEF_CTRLS; i++) { + switch (tegracam_def_cids[i]) { + case TEGRA_CAMERA_CID_GROUP_HOLD: + if ((sensor_ops > 0 && + ops->set_group_hold == NULL) || + (sensor_ex_ops > 0 && + ops->set_group_hold_ex == NULL)) + dev_err(dev, + "Missing TEGRA_CAMERA_CID_GROUP_HOLD implementation\n"); + if (ops->set_group_hold != NULL) + default_ops++; + if (ops->set_group_hold_ex != NULL) + default_ex_ops++; + break; + default: + break; + } + } + + /* Don't use extended control when blob support is not enabled */ + if (sensor_ex_ops > 0 && ops->is_blob_supported == false) { + dev_err(dev, + "ERROR: Extended controls only work when blob support is enabled\n"); + return -EINVAL; + } + + /* Should not mix normal and extended controls */ + if ((sensor_ops + default_ops) > 0 && + (sensor_ex_ops + default_ex_ops) > 0) { + dev_err(dev, + "ERROR: Can not mix normal and extended sensor controls\n"); + return -EINVAL; + } + total_ops = sensor_ops + mode_ops + string_ops + default_ops + compound_ops; + total_ops += sensor_ex_ops + default_ex_ops; + + if (total_ops != (ops->numctrls + TEGRACAM_DEF_CTRLS)) { + dev_err(dev, + "ERROR: %d controls registered with framework but missing implementation\n", + (ops->numctrls + TEGRACAM_DEF_CTRLS) - total_ops); + return -EINVAL; + } + + *numctrls = sensor_ops + sensor_ex_ops + mode_ops + string_ops + compound_ops; + + /* default controls are only needed if sensor controls are registered */ + if (sensor_ops > 0) + *numctrls += default_ops; + if (sensor_ex_ops > 0) + *numctrls += default_ex_ops; + + return 0; +} + +static bool find_matching_cid(const u32 *ctrl_cid_list, u32 numctrls, u32 cid) +{ + int i; + + for (i = 0; i < numctrls; i++) { + if (ctrl_cid_list[i] == cid) + return true; + } + + return false; +} + +static int tegracam_check_ctrl_cids(struct tegracam_ctrl_handler *handler) +{ + struct tegracam_device *tc_dev = handler->tc_dev; + struct device *dev = tc_dev->dev; + const struct tegracam_ctrl_ops *ops = handler->ctrl_ops; + int errors_found = 0; + + /* Find missing sensor control IDs */ + if (ops->set_gain != NULL || ops->set_gain_ex != NULL) { + if (!find_matching_cid(ops->ctrl_cid_list, + ops->numctrls, + TEGRA_CAMERA_CID_GAIN)) { + dev_err(dev, "Missing TEGRA_CAMERA_CID_GAIN registration\n"); + errors_found++; + } + } + + if (ops->set_exposure != NULL || ops->set_exposure_ex != NULL) { + if (!find_matching_cid(ops->ctrl_cid_list, + ops->numctrls, + TEGRA_CAMERA_CID_EXPOSURE)) { + dev_err(dev, "Missing TEGRA_CAMERA_CID_EXPOSURE registration\n"); + errors_found++; + } + } + + if (ops->set_exposure_short != NULL) { + if (!find_matching_cid(ops->ctrl_cid_list, + ops->numctrls, + TEGRA_CAMERA_CID_EXPOSURE_SHORT)) { + dev_err(dev, + "Missing TEGRA_CAMERA_CID_EXPOSURE_SHORT registration\n"); + errors_found++; + } + } + + if (ops->set_frame_rate != NULL || ops->set_frame_rate_ex != NULL) { + if (!find_matching_cid(ops->ctrl_cid_list, + ops->numctrls, + TEGRA_CAMERA_CID_FRAME_RATE)) { + dev_err(dev, "Missing TEGRA_CAMERA_CID_FRAME_RATE registration\n"); + errors_found++; + } + } + + /* Find missing string control IDs */ + if (ops->fill_string_ctrl != NULL) { + if (tegracam_get_string_ctrl_size( + TEGRA_CAMERA_CID_EEPROM_DATA, ops) > 0) { + if (!find_matching_cid(ops->ctrl_cid_list, + ops->numctrls, + TEGRA_CAMERA_CID_EEPROM_DATA)) { + dev_err(dev, + "Missing TEGRA_CAMERA_CID_EEPROM_DATA registration\n"); + errors_found++; + } + } + + if (tegracam_get_string_ctrl_size( + TEGRA_CAMERA_CID_FUSE_ID, ops) > 0) { + if (!find_matching_cid(ops->ctrl_cid_list, + ops->numctrls, + TEGRA_CAMERA_CID_FUSE_ID)) { + dev_err(dev, + "Missing TEGRA_CAMERA_CID_FUSE_ID registration\n"); + errors_found++; + } + } + + if (tegracam_get_string_ctrl_size( + TEGRA_CAMERA_CID_OTP_DATA, ops) > 0) { + if (!find_matching_cid(ops->ctrl_cid_list, + ops->numctrls, + TEGRA_CAMERA_CID_OTP_DATA)) { + dev_err(dev, + "Missing TEGRA_CAMERA_CID_OTP_DATA registration\n"); + errors_found++; + } + } + } + + if (ops->fill_compound_ctrl != NULL) { + if (tegracam_get_compound_ctrl_size( + TEGRA_CAMERA_CID_STEREO_EEPROM, ops) > 0) { + if (!find_matching_cid(ops->ctrl_cid_list, + ops->numctrls, + TEGRA_CAMERA_CID_STEREO_EEPROM)) { + dev_err(dev, + "Missing TEGRA_CAMERA_CID_STEREO_EEPROM registration\n"); + errors_found++; + } + } + } + + if (errors_found > 0) { + dev_err(dev, "ERROR: %d controls implemented but not registered with framework\n", + errors_found); + return -EINVAL; + } + + return 0; +} + +int tegracam_ctrl_handler_init(struct tegracam_ctrl_handler *handler) +{ + struct tegracam_device *tc_dev = handler->tc_dev; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl_config *ctrl_cfg; + struct device *dev = tc_dev->dev; + const struct tegracam_ctrl_ops *ops = handler->ctrl_ops; + const u32 *cids = NULL; + u32 numctrls = 0; + int i, j; + int err = 0; + + if (ops != NULL) { + cids = ops->ctrl_cid_list; + + err = tegracam_check_ctrl_ops(handler, &numctrls); + if (err) { + dev_err(dev, "Error %d in control ops setup\n", err); + goto ctrl_error; + } + + err = tegracam_check_ctrl_cids(handler); + if (err) { + dev_err(dev, "Error %d in control cids setup\n", err); + goto ctrl_error; + } + } + err = v4l2_ctrl_handler_init(&handler->ctrl_handler, numctrls); + + for (i = 0, j = 0; i < numctrls; i++) { + u32 cid = i < ops->numctrls ? cids[i] : tegracam_def_cids[j++]; + int index = tegracam_get_ctrl_index(cid); + int size = 0; + if (index >= ARRAY_SIZE(ctrl_cfg_list)) { + dev_err(dev, "unsupported control in the list\n"); + return -ENOTTY; + } + + ctrl_cfg = &ctrl_cfg_list[index]; + if (ctrl_cfg->type == V4L2_CTRL_TYPE_STRING) { + size = tegracam_get_string_ctrl_size(ctrl_cfg->id, ops); + if (size < 0) { + dev_err(dev, "Invalid string ctrl size\n"); + return -EINVAL; + } + ctrl_cfg->max = size; + } + + if (ctrl_cfg->type == V4L2_CTRL_COMPOUND_TYPES) { + size = tegracam_get_compound_ctrl_size(ctrl_cfg->id, + ops); + if (size < 0) { + dev_err(dev, "Invalid compound ctrl size\n"); + return -EINVAL; + } + ctrl_cfg->dims[0] = size; + } + + ctrl = v4l2_ctrl_new_custom(&handler->ctrl_handler, + ctrl_cfg, NULL); + if (ctrl == NULL) { + dev_err(dev, "Failed to init %s ctrl\n", + ctrl_cfg->name); + return -EINVAL; + } + + if (ctrl_cfg->type == V4L2_CTRL_TYPE_STRING && + ctrl_cfg->flags & V4L2_CTRL_FLAG_READ_ONLY) { + ctrl->p_new.p_char = devm_kzalloc(tc_dev->dev, + size + 1, GFP_KERNEL); + } + + if ((ctrl_cfg->type == V4L2_CTRL_COMPOUND_TYPES) && + ctrl_cfg->flags & V4L2_CTRL_FLAG_READ_ONLY) + ctrl->p_new.p = devm_kzalloc(tc_dev->dev, + ctrl_cfg->max, GFP_KERNEL); + + handler->ctrls[i] = ctrl; + }; + + spec_bar(); + + handler->numctrls = numctrls; + err = v4l2_ctrl_handler_setup(&handler->ctrl_handler); + if (err) { + dev_err(dev, "Error %d in control hdl setup\n", err); + goto error; + } + + err = handler->ctrl_handler.error; + if (err) { + dev_err(dev, "Error %d adding controls\n", err); + goto error; + } + + err = tegracam_setup_string_ctrls(tc_dev, handler); + if (err) { + dev_err(dev, "setup string controls failed\n"); + goto error; + } + + err = tegracam_setup_compound_ctrls(tc_dev, handler); + if (err) { + dev_err(dev, "setup compound controls failed\n"); + goto error; + } + + err = tegracam_init_ctrl_ranges(handler); + if (err) { + dev_err(dev, "Error %d updating control ranges\n", err); + goto error; + } + return 0; +error: + v4l2_ctrl_handler_free(&handler->ctrl_handler); +ctrl_error: + return err; +} +EXPORT_SYMBOL_GPL(tegracam_ctrl_handler_init); diff --git a/drivers/media/platform/tegra/camera/tegracam_utils.c b/drivers/media/platform/tegra/camera/tegracam_utils.c new file mode 100644 index 00000000..5530c9b7 --- /dev/null +++ b/drivers/media/platform/tegra/camera/tegracam_utils.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * tegracam_utils - tegra camera framework utilities + * + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include + +bool is_tvcf_supported(u32 version) +{ + /* 2.0.0 is the base tvcf version sensor driver*/ + return (version >= tegracam_version(2, 0, 0) ? true : false); +} +EXPORT_SYMBOL_GPL(is_tvcf_supported); + +int format_tvcf_version(u32 version, char *buff, size_t size) +{ + if (buff == NULL) + return -EINVAL; + + return snprintf(buff, size, "%u.%u.%u", + (u8)(version >> 16), + (u8)(version >> 8), + (u8)(version)); +} +EXPORT_SYMBOL_GPL(format_tvcf_version); + +void conv_u32_u8arr(u32 input, u8 *output) +{ + output[0] = (input >> 24) & 0xFF; + output[1] = (input >> 16) & 0xFF; + output[2] = (input >> 8) & 0xFF; + output[3] = input & 0xFF; +} +EXPORT_SYMBOL_GPL(conv_u32_u8arr); + +void conv_u16_u8arr(u16 input, u8 *output) +{ + output[0] = (input >> 8) & 0xFF; + output[1] = input & 0xFF; +} +EXPORT_SYMBOL_GPL(conv_u16_u8arr); + +static inline int is_valid_blob(struct sensor_blob *blob, u32 size) +{ + if (!blob) + return -EINVAL; + + if ((blob->num_cmds >= MAX_COMMANDS) || + ((blob->buf_size + size) >= MAX_BLOB_SIZE)) + return -ENOMEM; + + return 0; +} + +int prepare_write_cmd(struct sensor_blob *blob, + u32 size, u32 addr, u8 *buf) +{ + struct sensor_cmd *cmd = NULL; + int err = 0; + + err = is_valid_blob(blob, size); + if (err) + return err; + + cmd = &blob->cmds[blob->num_cmds++]; + cmd->opcode = ((SENSOR_OPCODE_WRITE << 24) | size); + cmd->addr = addr; + + memcpy(&blob->buf[blob->buf_size], buf, size); + + blob->buf_size += size; + + return 0; +} +EXPORT_SYMBOL_GPL(prepare_write_cmd); + +int prepare_read_cmd(struct sensor_blob *blob, + u32 size, u32 addr) +{ + struct sensor_cmd *cmd = NULL; + int err = 0; + + err = is_valid_blob(blob, size); + if (err) + return err; + + cmd = &blob->cmds[blob->num_cmds++]; + cmd->opcode = ((SENSOR_OPCODE_READ << 24) | size); + cmd->addr = addr; + + blob->buf_size += size; + + return 0; +} +EXPORT_SYMBOL_GPL(prepare_read_cmd); + +int prepare_sleep_cmd(struct sensor_blob *blob, u32 time_in_us) +{ + struct sensor_cmd *cmd = NULL; + int err = 0; + + err = is_valid_blob(blob, 0); + if (err) + return err; + + cmd = &blob->cmds[blob->num_cmds++]; + cmd->opcode = (SENSOR_OPCODE_SLEEP << 24) | time_in_us; + + return 0; +} +EXPORT_SYMBOL_GPL(prepare_sleep_cmd); + +int prepare_done_cmd(struct sensor_blob *blob) +{ + struct sensor_cmd *cmd = NULL; + int err = 0; + + err = is_valid_blob(blob, 0); + if (err) + return err; + + cmd = &blob->cmds[blob->num_cmds++]; + cmd->opcode = SENSOR_OPCODE_DONE; + + return 0; +} +EXPORT_SYMBOL_GPL(prepare_done_cmd); + +int convert_table_to_blob(struct sensor_blob *blob, + const struct reg_8 table[], + u16 wait_ms_addr, u16 end_addr) +{ + const struct reg_8 *next; + u16 addr; + u8 val; + int range_start = -1; + u32 range_count = 0; + u8 buf[16]; + + for (next = table;; next++) { + val = next->val; + addr = next->addr; + if (range_start == -1) + range_start = next->addr; + + if (range_count == 16 || + (addr != (range_start + range_count))) { + /* write opcode and size for store index*/ + prepare_write_cmd(blob, range_count, + range_start, &buf[0]); + range_start = addr; + range_count = 0; + } + + /* Done command must be added by client */ + if (addr == end_addr) + break; + + if (addr == wait_ms_addr) { + prepare_sleep_cmd(blob, (next->val * 1000)); + range_start = -1; + continue; + } + + buf[range_count++] = val; + } + + return 0; +} +EXPORT_SYMBOL_GPL(convert_table_to_blob); + +int write_sensor_blob(struct regmap *regmap, struct sensor_blob *blob) +{ + int err = 0; + int cmd_idx = 0; + int buf_index = 0; + + while (cmd_idx < blob->num_cmds) { + struct sensor_cmd *cmd = &blob->cmds[cmd_idx++]; + u32 val; + + val = cmd->opcode; + if ((val >> 24) == SENSOR_OPCODE_DONE) + break; + + if ((val >> 24) == SENSOR_OPCODE_SLEEP) { + val = val & 0x00FFFFFF; + usleep_range(val, val + 10); + continue; + } + + if ((val >> 24) == SENSOR_OPCODE_WRITE) { + int size = val & 0x00FFFFFF; + + err = regmap_bulk_write(regmap, cmd->addr, + &blob->buf[buf_index], size); + if (err) + return err; + buf_index += size; + } else { + pr_err("blob has been packaged with errors\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(write_sensor_blob); + +int tegracam_write_blobs(struct tegracam_ctrl_handler *hdl) +{ + struct camera_common_data *s_data = hdl->tc_dev->s_data; + struct tegracam_sensor_data *sensor_data = &hdl->sensor_data; + struct sensor_blob *ctrl_blob = &sensor_data->ctrls_blob; + struct sensor_blob *mode_blob = &sensor_data->mode_blob; + const struct tegracam_ctrl_ops *ops = hdl->ctrl_ops; + int err = 0; + + /* no blob control available */ + if (ops == NULL || !ops->is_blob_supported) + return 0; + + /* + * TODO: Extend this to multiple subdevices + * mode blob commands can be zero for auto control updates + * and stop streaming cases + */ + if (mode_blob->num_cmds) { + err = write_sensor_blob(s_data->regmap, mode_blob); + if (err) { + dev_err(s_data->dev, "Error writing mode blob\n"); + return err; + } + } + + err = write_sensor_blob(s_data->regmap, ctrl_blob); + if (err) { + dev_err(s_data->dev, "Error writing control blob\n"); + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(tegracam_write_blobs); diff --git a/drivers/media/platform/tegra/camera/tegracam_v4l2.c b/drivers/media/platform/tegra/camera/tegracam_v4l2.c new file mode 100644 index 00000000..f348171e --- /dev/null +++ b/drivers/media/platform/tegra/camera/tegracam_v4l2.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * tegracam_v4l2 - tegra camera framework for v4l2 support + * + * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include +#include +#include + +static int v4l2sd_stream(struct v4l2_subdev *sd, int enable) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct camera_common_data *s_data = to_camera_common_data(&client->dev); + struct camera_common_sensor_ops *sensor_ops; + struct tegracam_device *tc_dev; + struct tegracam_sensor_data *sensor_data; + struct sensor_blob *ctrl_blob; + struct sensor_blob *mode_blob; + int err = 0; + + dev_dbg(&client->dev, "%s++ enable %d\n", __func__, enable); + + if (!s_data) + return -EINVAL; + + sensor_ops = s_data->ops; + tc_dev = to_tegracam_device(s_data); + sensor_data = &s_data->tegracam_ctrl_hdl->sensor_data; + ctrl_blob = &sensor_data->ctrls_blob; + mode_blob = &sensor_data->mode_blob; + + /* reset control packet at start/stop streaming */ + memset(ctrl_blob, 0, sizeof(struct sensor_blob)); + memset(mode_blob, 0, sizeof(struct sensor_blob)); + if (enable) { + /* increase ref count so module can't be unloaded */ + if (!try_module_get(s_data->owner)) + return -ENODEV; + + err = sensor_ops->set_mode(tc_dev); + if (err) { + dev_err(&client->dev, "Error writing mode\n"); + goto error; + } + + /* update control ranges based on mode settings*/ + err = tegracam_init_ctrl_ranges_by_mode( + s_data->tegracam_ctrl_hdl, (u32) s_data->mode); + if (err) { + dev_err(&client->dev, "Error updating control ranges\n"); + goto error; + } + + if (s_data->override_enable) { + err = tegracam_ctrl_set_overrides( + s_data->tegracam_ctrl_hdl); + if (err) { + dev_err(&client->dev, + "overrides cannot be set\n"); + goto error; + } + } + + err = sensor_ops->start_streaming(tc_dev); + if (err) { + dev_err(&client->dev, "Error turning on streaming\n"); + goto error; + } + + /* add done command for blobs */ + prepare_done_cmd(mode_blob); + prepare_done_cmd(ctrl_blob); + tc_dev->is_streaming = true; + } else { + err = sensor_ops->stop_streaming(tc_dev); + if (err) { + dev_err(&client->dev, "Error turning off streaming\n"); + goto error; + } + + /* add done command for blob */ + prepare_done_cmd(ctrl_blob); + tc_dev->is_streaming = false; + + module_put(s_data->owner); + } + + return 0; + +error: + module_put(s_data->owner); + return err; +} + +static int v4l2sd_g_input_status(struct v4l2_subdev *sd, u32 *status) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct camera_common_data *s_data = to_camera_common_data(&client->dev); + struct camera_common_power_rail *pw; + + if (!s_data) + return -EINVAL; + + pw = s_data->power; + *status = pw->state == SWITCH_ON; + return 0; +} + +static struct v4l2_subdev_video_ops v4l2sd_video_ops = { + .s_stream = v4l2sd_stream, + .g_input_status = v4l2sd_g_input_status, +}; + +static struct v4l2_subdev_core_ops v4l2sd_core_ops = { + .s_power = camera_common_s_power, +}; + +static int v4l2sd_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + return camera_common_g_fmt(sd, &format->format); +} + +static int v4l2sd_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct camera_common_data *s_data = to_camera_common_data(&client->dev); + int ret; + + if (!s_data) + return -EINVAL; + + if (format->which == V4L2_SUBDEV_FORMAT_TRY) + ret = camera_common_try_fmt(sd, &format->format); + else { + ret = camera_common_s_fmt(sd, &format->format); + + if (ret == 0) { + /* update control ranges based on mode settings*/ + ret = tegracam_init_ctrl_ranges_by_mode( + s_data->tegracam_ctrl_hdl, (u32) s_data->mode); + if (ret) { + dev_err(&client->dev, "Error updating control ranges %d\n", ret); + return ret; + } + } + } + + /* TODO: Add set mode for blob collection */ + + return ret; +} + +static struct v4l2_subdev_pad_ops v4l2sd_pad_ops = { + .set_fmt = v4l2sd_set_fmt, + .get_fmt = v4l2sd_get_fmt, + .enum_mbus_code = camera_common_enum_mbus_code, + .enum_frame_size = camera_common_enum_framesizes, + .enum_frame_interval = camera_common_enum_frameintervals, + .get_mbus_config = camera_common_get_mbus_config, +}; + +static struct v4l2_subdev_ops v4l2sd_ops = { + .core = &v4l2sd_core_ops, + .video = &v4l2sd_video_ops, + .pad = &v4l2sd_pad_ops, +}; + +static const struct media_entity_operations media_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +int tegracam_v4l2subdev_register(struct tegracam_device *tc_dev, + bool is_sensor) +{ + struct camera_common_data *s_data = tc_dev->s_data; + struct tegracam_ctrl_handler *ctrl_hdl; + struct v4l2_subdev *sd = NULL; + struct device *dev = tc_dev->dev; + int err = 0; + + if (!s_data) + return -EINVAL; + + ctrl_hdl = s_data->tegracam_ctrl_hdl; + + /* init v4l2 subdevice for registration */ + sd = &s_data->subdev; + if (!sd || !tc_dev->client) { + dev_err(dev, "Invalid subdev context\n"); + return -ENODEV; + } + + v4l2_i2c_subdev_init(sd, tc_dev->client, &v4l2sd_ops); + + ctrl_hdl->ctrl_ops = tc_dev->tcctrl_ops; + err = tegracam_ctrl_handler_init(ctrl_hdl); + if (err) { + dev_err(dev, "Failed to init ctrls %s\n", tc_dev->name); + return err; + } + if (ctrl_hdl->ctrl_ops != NULL) + tc_dev->numctrls = ctrl_hdl->ctrl_ops->numctrls; + else + tc_dev->numctrls = 0; + s_data->numctrls = tc_dev->numctrls; + sd->ctrl_handler = s_data->ctrl_handler = &ctrl_hdl->ctrl_handler; + s_data->ctrls = ctrl_hdl->ctrls; + sd->internal_ops = tc_dev->v4l2sd_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS; + s_data->owner = sd->owner; + /* Set owner to NULL so we can unload the driver module */ + sd->owner = NULL; + +#if defined(CONFIG_MEDIA_CONTROLLER) + tc_dev->pad.flags = MEDIA_PAD_FL_SOURCE; + sd->entity.ops = &media_ops; + err = tegra_media_entity_init(&sd->entity, + 1, &tc_dev->pad, true, is_sensor); + if (err < 0) { + dev_err(dev, "unable to init media entity\n"); + return err; + } +#endif + + return v4l2_async_register_subdev(sd); +} +EXPORT_SYMBOL_GPL(tegracam_v4l2subdev_register); + +void tegracam_v4l2subdev_unregister(struct tegracam_device *tc_dev) +{ + struct camera_common_data *s_data = tc_dev->s_data; + struct v4l2_subdev *sd; + + if (!s_data) + return; + + sd = &s_data->subdev; + + v4l2_ctrl_handler_free(s_data->ctrl_handler); + v4l2_async_unregister_subdev(sd); +#if defined(CONFIG_MEDIA_CONTROLLER) + media_entity_cleanup(&sd->entity); +#endif +} +EXPORT_SYMBOL_GPL(tegracam_v4l2subdev_unregister); diff --git a/drivers/media/platform/tegra/camera/vi/channel.c b/drivers/media/platform/tegra/camera/vi/channel.c new file mode 100644 index 00000000..41585760 --- /dev/null +++ b/drivers/media/platform/tegra/camera/vi/channel.c @@ -0,0 +1,2714 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVIDIA Tegra Video Input Device + * + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#define CREATE_TRACE_POINTS +#include + +#include "mipical/mipi_cal.h" + +#include +#include "nvcsi/nvcsi.h" +#include "nvcsi/deskew.h" + +#define TPG_CSI_GROUP_ID 10 +#define HDMI_IN_RATE 550000000 + +static s64 queue_init_ts; + +static bool tegra_channel_verify_focuser(struct tegra_channel *chan) +{ + char *focuser; + + /* + * WAR - to avoid power on/off during open/close for sensor + * nodes but not focuser nodes. + * add an array when more focusers are available, this logic is + * not needed once the focuser is bound to sensor channel + */ + focuser = strnstr(chan->video->name, "lc898212", sizeof(chan->video->name)); + + return (focuser != NULL); +} + +static void gang_buffer_offsets(struct tegra_channel *chan) +{ + int i; + u32 offset = 0; + + for (i = 0; i < chan->total_ports; i++) { + switch (chan->gang_mode) { + case CAMERA_NO_GANG_MODE: + case CAMERA_GANG_L_R: + case CAMERA_GANG_R_L: + offset = chan->gang_bytesperline; + break; + case CAMERA_GANG_T_B: + case CAMERA_GANG_B_T: + offset = chan->gang_sizeimage; + break; + default: + offset = 0; + } + offset = ((offset + TEGRA_SURFACE_ALIGNMENT - 1) & + ~(TEGRA_SURFACE_ALIGNMENT - 1)); + chan->buffer_offset[i] = i * offset; + } + spec_bar(); +} + +static u32 gang_mode_width(enum camera_gang_mode gang_mode, + unsigned int width) +{ + if ((gang_mode == CAMERA_GANG_L_R) || + (gang_mode == CAMERA_GANG_R_L)) + return width >> 1; + else + return width; +} + +static u32 gang_mode_height(enum camera_gang_mode gang_mode, + unsigned int height) +{ + if ((gang_mode == CAMERA_GANG_T_B) || + (gang_mode == CAMERA_GANG_B_T)) + return height >> 1; + else + return height; +} + +static void update_gang_mode_params(struct tegra_channel *chan) +{ + chan->gang_width = gang_mode_width(chan->gang_mode, + chan->format.width); + chan->gang_height = gang_mode_height(chan->gang_mode, + chan->format.height); + chan->gang_bytesperline = ((chan->gang_width * + chan->fmtinfo->bpp.numerator) / + chan->fmtinfo->bpp.denominator); + chan->gang_sizeimage = chan->gang_bytesperline * + chan->format.height; + gang_buffer_offsets(chan); +} + +static void update_gang_mode(struct tegra_channel *chan) +{ + int width = chan->format.width; + int height = chan->format.height; + + /* + * At present only 720p, 1080p and 4k resolutions + * are supported and only 4K requires gang mode + * Update this code with CID for future extensions + * Also, validate width and height of images based + * on gang mode and surface stride alignment + */ + if ((width > 1920) && (height > 1080)) { + chan->gang_mode = CAMERA_GANG_L_R; + chan->valid_ports = chan->total_ports; + } else { + chan->gang_mode = CAMERA_NO_GANG_MODE; + chan->valid_ports = 1; + } + + update_gang_mode_params(chan); +} + +static u32 get_aligned_buffer_size(struct tegra_channel *chan, + u32 bytesperline, u32 height) +{ + u32 height_aligned; + u32 temp_size, size; + + height_aligned = roundup(height, chan->height_align); + temp_size = bytesperline * height_aligned; + size = roundup(temp_size, chan->size_align); + + return size; +} + +static void tegra_channel_fmt_align(struct tegra_channel *chan, + const struct tegra_video_format *vfmt, + u32 *width, u32 *height, u32 *bytesperline) +{ + unsigned int min_bpl; + unsigned int max_bpl; + unsigned int align, fmt_align; + unsigned int temp_bpl; + unsigned int bpl; + unsigned int numerator, denominator; + const struct tegra_frac *bpp = &vfmt->bpp; + + /* Init, if un-init */ + if (!*width || !*height) { + *width = chan->format.width; + *height = chan->format.height; + } + + denominator = (!bpp->denominator) ? 1 : bpp->denominator; + numerator = (!bpp->numerator) ? 1 : bpp->numerator; + /* The transfer alignment requirements are expressed in bytes. Compute + * the minimum and maximum values, clamp the requested width and convert + * it back to pixels. + * use denominator for base width alignment when >1. + * use bytesperline to adjust width for applicaton related requriements. + */ + fmt_align = (denominator == 1) ? numerator : 1; + align = lcm(chan->width_align, fmt_align); + align = align > 0 ? align : 1; + bpl = tegra_core_bytes_per_line(*width, align, vfmt); + + /* Align stride */ + if (chan->vi->fops->vi_stride_align) + chan->vi->fops->vi_stride_align(&bpl); + + if (!*bytesperline) + *bytesperline = bpl; + + /* Don't clamp the width based on bpl as stride and width can be + * different. Aligned width also may force a sensor mode change other + * than the requested one + */ + *height = clamp(*height, TEGRA_MIN_HEIGHT, TEGRA_MAX_HEIGHT); + + /* Clamp the requested bytes per line value. If the maximum bytes per + * line value is zero, the module doesn't support user configurable line + * sizes. Override the requested value with the minimum in that case. + */ + min_bpl = bpl; + max_bpl = rounddown(TEGRA_MAX_WIDTH, chan->stride_align); + temp_bpl = roundup(*bytesperline, chan->stride_align); + + *bytesperline = clamp(temp_bpl, min_bpl, max_bpl); +} + +/* Check if sensor mode is interlaced and the type of interlaced mode */ + +static void tegra_channel_set_interlace_mode(struct tegra_channel *chan) +{ + struct v4l2_subdev *sd = NULL; + struct camera_common_data *s_data = NULL; + struct device_node *node = NULL; + struct sensor_mode_properties *s_mode = NULL; + + if (chan->subdev_on_csi) { + sd = chan->subdev_on_csi; + s_data = to_camera_common_data(sd->dev); + node = sd->dev->of_node; + } + + if (s_data != NULL && node != NULL) { + int idx = s_data->mode_prop_idx; + + if (idx < s_data->sensor_props.num_modes) { + s_mode = &s_data->sensor_props.sensor_modes[idx]; + chan->is_interlaced = + s_mode->control_properties.is_interlaced; + if (chan->is_interlaced) { + if (s_mode->control_properties.interlace_type) + chan->interlace_type = Interleaved; + else + chan->interlace_type = Top_Bottom; + } + } + } +} + +static void tegra_channel_update_format(struct tegra_channel *chan, + u32 width, u32 height, u32 fourcc, + const struct tegra_frac *bpp, + u32 preferred_stride) +{ + u32 denominator = (!bpp->denominator) ? 1 : bpp->denominator; + u32 numerator = (!bpp->numerator) ? 1 : bpp->numerator; + u32 bytesperline = (width * numerator / denominator); + + /* Align stride */ + if (chan->vi->fops->vi_stride_align) + chan->vi->fops->vi_stride_align(&bytesperline); + + chan->format.width = width; + chan->format.height = height; + chan->format.pixelformat = fourcc; + chan->format.bytesperline = preferred_stride ?: bytesperline; + chan->buffer_offset[0] = 0; + chan->interlace_bplfactor = 1; + + dev_dbg(&chan->video->dev, + "%s: Resolution= %dx%d bytesperline=%d\n", + __func__, width, height, chan->format.bytesperline); + + tegra_channel_fmt_align(chan, chan->fmtinfo, + &chan->format.width, + &chan->format.height, + &chan->format.bytesperline); + + /* Calculate the sizeimage per plane */ + chan->format.sizeimage = get_aligned_buffer_size(chan, + chan->format.bytesperline, chan->format.height); + + tegra_channel_set_interlace_mode(chan); + /* Double the size of allocated buffer for interlaced sensor modes */ + if (chan->is_interlaced) + chan->format.sizeimage *= 2; + + if (fourcc == V4L2_PIX_FMT_NV16) + chan->format.sizeimage *= 2; +} + +static void tegra_channel_fmts_bitmap_init(struct tegra_channel *chan) +{ + int ret, pixel_format_index = 0, init_code = 0; + struct v4l2_subdev *subdev = chan->subdev_on_csi; + struct v4l2_subdev_format fmt = { + .pad = 0, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + struct v4l2_subdev_mbus_code_enum code = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + struct v4l2_subdev_state cfg = {}; + bitmap_zero(chan->fmts_bitmap, MAX_FORMAT_NUM); + + /* + * Initialize all the formats available from + * the sub-device and extract the corresponding + * index from the pre-defined video formats and initialize + * the channel default format with the active code + * Index zero as the only sub-device is sensor + */ + while (1) { + ret = v4l2_subdev_call(subdev, pad, enum_mbus_code, + &cfg, &code); + if (ret < 0) + /* no more formats */ + break; + + pixel_format_index = + tegra_core_get_idx_by_code(chan, code.code, 0); + while (pixel_format_index >= 0) { + bitmap_set(chan->fmts_bitmap, pixel_format_index, 1); + /* Set init_code to the first matched format */ + if (!init_code) + init_code = code.code; + /* Look for other formats with the same mbus code */ + pixel_format_index = tegra_core_get_idx_by_code(chan, + code.code, pixel_format_index + 1); + } + + code.index++; + } + + if (!init_code) { + pixel_format_index = + tegra_core_get_idx_by_code(chan, TEGRA_VF_DEF, 0); + if (pixel_format_index >= 0) { + bitmap_set(chan->fmts_bitmap, pixel_format_index, 1); + init_code = TEGRA_VF_DEF; + } + } + /* Get the format based on active code of the sub-device */ + ret = v4l2_subdev_call(subdev, pad, get_fmt, &cfg, &fmt); + if (ret) + return; + + /* Initiate the channel format to the first matched format */ + chan->fmtinfo = + tegra_core_get_format_by_code(chan, fmt.format.code, 0); + if (!chan->fmtinfo) + return; + + v4l2_fill_pix_format(&chan->format, &fmt.format); + tegra_channel_update_format(chan, chan->format.width, + chan->format.height, + chan->fmtinfo->fourcc, + &chan->fmtinfo->bpp, + chan->preferred_stride); + + if (chan->total_ports > 1) + update_gang_mode(chan); +} + +/* + * ----------------------------------------------------------------------------- + * Tegra channel frame setup and capture operations + * ----------------------------------------------------------------------------- + */ +/* + * Update the timestamp of the buffer + */ +void set_timestamp(struct tegra_channel_buffer *buf, + const struct timespec64 *ts) +{ + buf->buf.vb2_buf.timestamp = (u64)timespec64_to_ns(ts); +} +EXPORT_SYMBOL(set_timestamp); + +void release_buffer(struct tegra_channel *chan, + struct tegra_channel_buffer *buf) +{ + struct vb2_v4l2_buffer *vbuf = &buf->buf; + s64 frame_arrived_ts = 0; + + /* release one frame */ + vbuf->sequence = chan->sequence++; + vbuf->field = V4L2_FIELD_NONE; + vb2_set_plane_payload(&vbuf->vb2_buf, + 0, chan->format.sizeimage); + + /* + * WAR to force buffer state if capture state is not good + * WAR - After sync point timeout or error frame capture + * the second buffer is intermittently frame of zeros + * with no error status or padding. + */ + if (chan->capture_state != CAPTURE_GOOD || vbuf->sequence < 2) + buf->state = VB2_BUF_STATE_ERROR; + + if (chan->sequence == 1) { + /* + * Evaluate the initial capture latency between videobuf2 queue + * and first captured frame release to user-space. + */ + frame_arrived_ts = ktime_to_ms(ktime_get()); + dev_dbg(&chan->video->dev, + "%s: capture init latency is %lld ms\n", + __func__, (frame_arrived_ts - queue_init_ts)); + } + + dev_dbg(&chan->video->dev, + "%s: release buf[%p] frame[%d] to user-space\n", + __func__, buf, chan->sequence); + vb2_buffer_done(&vbuf->vb2_buf, buf->state); +} +EXPORT_SYMBOL(release_buffer); + +/* + * `buf` has been successfully setup to receive a frame and is + * "in flight" through the VI hardware. We are currently waiting + * on it to be filled. Moves the pointer into the `release` list + * for the release thread to wait on. + */ +void enqueue_inflight(struct tegra_channel *chan, + struct tegra_channel_buffer *buf) +{ + /* Put buffer into the release queue */ + spin_lock(&chan->release_lock); + list_add_tail(&buf->queue, &chan->release); + spin_unlock(&chan->release_lock); + + /* Wake up kthread for release */ + wake_up_interruptible(&chan->release_wait); +} +EXPORT_SYMBOL(enqueue_inflight); + +struct tegra_channel_buffer *dequeue_inflight(struct tegra_channel *chan) +{ + struct tegra_channel_buffer *buf = NULL; + + spin_lock(&chan->release_lock); + if (list_empty(&chan->release)) { + spin_unlock(&chan->release_lock); + return NULL; + } + + buf = list_entry(chan->release.next, + struct tegra_channel_buffer, queue); + + if (buf) + list_del_init(&buf->queue); + + spin_unlock(&chan->release_lock); + return buf; +} +EXPORT_SYMBOL(dequeue_inflight); + +void tegra_channel_init_ring_buffer(struct tegra_channel *chan) +{ + chan->released_bufs = 0; + chan->num_buffers = 0; + chan->save_index = 0; + chan->free_index = 0; + chan->bfirst_fstart = false; + chan->capture_descr_index = 0; + chan->capture_descr_sequence = 0; + chan->queue_error = false; +} +EXPORT_SYMBOL(tegra_channel_init_ring_buffer); + +void free_ring_buffers(struct tegra_channel *chan, int frames) +{ + struct vb2_v4l2_buffer *vbuf; + s64 frame_arrived_ts = 0; + + spin_lock(&chan->buffer_lock); + + if (frames == 0) + frames = chan->num_buffers; + + while (frames > 0) { + vbuf = chan->buffers[chan->free_index]; + + /* Skip updating the buffer sequence with channel sequence + * for interlaced captures and this instead will be updated + * with frame id received from CSI with capture complete + */ + if (!chan->is_interlaced) + vbuf->sequence = chan->sequence++; + else + chan->sequence++; + /* release one frame */ + vbuf->field = V4L2_FIELD_NONE; + vb2_set_plane_payload(&vbuf->vb2_buf, + 0, chan->format.sizeimage); + + /* + * WAR to force buffer state if capture state is not good + * WAR - After sync point timeout or error frame capture + * the second buffer is intermittently frame of zeros + * with no error status or padding. + */ + /* This will drop the first two frames. Disable for now. */ + if (chan->capture_state != CAPTURE_GOOD || + chan->released_bufs < 2) + chan->buffer_state[chan->free_index] = + VB2_BUF_STATE_ERROR; + + if (chan->sequence == 1) { + /* + * Evaluate the initial capture latency + * between videobuf2 queue and first captured + * frame release to user-space. + */ + frame_arrived_ts = ktime_to_ms(ktime_get()); + dev_dbg(&chan->video->dev, + "%s: capture init latency is %lld ms\n", + __func__, (frame_arrived_ts - queue_init_ts)); + } + vb2_buffer_done(&vbuf->vb2_buf, + chan->buffer_state[chan->free_index++]); + + if (chan->free_index >= chan->capture_queue_depth) + chan->free_index = 0; + chan->num_buffers--; + chan->released_bufs++; + frames--; + } + spin_unlock(&chan->buffer_lock); +} +EXPORT_SYMBOL(free_ring_buffers); + +static void add_buffer_to_ring(struct tegra_channel *chan, + struct vb2_v4l2_buffer *vb) +{ + /* save the buffer to the ring first */ + /* Mark buffer state as error before start */ + spin_lock(&chan->buffer_lock); + chan->buffer_state[chan->save_index] = VB2_BUF_STATE_ERROR; + chan->buffers[chan->save_index++] = vb; + if (chan->save_index >= chan->capture_queue_depth) + chan->save_index = 0; + chan->num_buffers++; + spin_unlock(&chan->buffer_lock); +} + +static void update_state_to_buffer(struct tegra_channel *chan, int state) +{ + int save_index = (chan->save_index - PREVIOUS_BUFFER_DEC_INDEX); + + /* save index decrements by 2 as 3 bufs are added in ring buffer */ + if (save_index < 0) + save_index += chan->capture_queue_depth; + /* update state for the previous buffer */ + chan->buffer_state[save_index] = state; + + /* for timeout/error case update the current buffer state as well */ + if (chan->capture_state != CAPTURE_GOOD) + chan->buffer_state[chan->save_index] = state; +} + +void tegra_channel_ring_buffer(struct tegra_channel *chan, + struct vb2_v4l2_buffer *vb, + struct timespec64 *ts, int state) +{ + if (!chan->bfirst_fstart) + chan->bfirst_fstart = true; + else + update_state_to_buffer(chan, state); + + /* Capture state is not GOOD, release all buffers and re-init state */ + if (chan->capture_state != CAPTURE_GOOD) { + free_ring_buffers(chan, chan->num_buffers); + tegra_channel_init_ring_buffer(chan); + return; + } else { + /* TODO: granular time code information */ + vb->timecode.seconds = ts->tv_sec; + } + + /* release buffer N at N+2 frame start event */ + if (chan->num_buffers >= (chan->capture_queue_depth - 1)) + free_ring_buffers(chan, 1); +} +EXPORT_SYMBOL(tegra_channel_ring_buffer); + +void tegra_channel_ec_close(struct tegra_mc_vi *vi) +{ + struct tegra_channel *chan; + + /* clear all channles sync point fifo context */ + list_for_each_entry(chan, &vi->vi_chans, list) { + memset(&chan->syncpoint_fifo[0], + 0, sizeof(chan->syncpoint_fifo)); + } +} +EXPORT_SYMBOL(tegra_channel_ec_close); + +struct tegra_channel_buffer *dequeue_buffer(struct tegra_channel *chan, + bool requeue) +{ + struct tegra_channel_buffer *buf = NULL; + + spin_lock(&chan->start_lock); + if (list_empty(&chan->capture)) + goto done; + + buf = list_entry(chan->capture.next, + struct tegra_channel_buffer, queue); + list_del_init(&buf->queue); + + if (requeue) { + /* add dequeued buffer to the ring buffer */ + add_buffer_to_ring(chan, &buf->buf); + } +done: + spin_unlock(&chan->start_lock); + return buf; +} +EXPORT_SYMBOL(dequeue_buffer); + +struct tegra_channel_buffer *dequeue_dequeue_buffer(struct tegra_channel *chan) +{ + struct tegra_channel_buffer *buf = NULL; + + spin_lock(&chan->dequeue_lock); + + if (list_empty(&chan->dequeue)) + goto done; + + buf = list_entry(chan->dequeue.next, struct tegra_channel_buffer, + queue); + list_del_init(&buf->queue); + +done: + spin_unlock(&chan->dequeue_lock); + return buf; +} +EXPORT_SYMBOL(dequeue_dequeue_buffer); + +int tegra_channel_error_recover(struct tegra_channel *chan, bool queue_error) +{ + struct tegra_mc_vi *vi = chan->vi; + int err = 0; + + if (!(vi->fops && vi->fops->vi_error_recover)) { + err = -EIO; + goto done; + } + + dev_warn(vi->dev, "err_rec: attempting to reset the capture channel\n"); + + err = vi->fops->vi_error_recover(chan, queue_error); + if (!err) + dev_warn(vi->dev, + "err_rec: successfully reset the capture channel\n"); + +done: + return err; +} +EXPORT_SYMBOL(tegra_channel_error_recover); + +static struct device *tegra_channel_get_vi_unit(struct tegra_channel *chan) +{ + struct tegra_mc_vi *vi = chan->vi; + struct device *vi_unit_dev; + + if (vi->fops->vi_unit_get_device_handle) + vi->fops->vi_unit_get_device_handle(vi->ndev, chan->port[0], + &vi_unit_dev); + else + vi_unit_dev = vi->dev; + + return vi_unit_dev; +} + +/* + * ----------------------------------------------------------------------------- + * videobuf2 queue operations + * ----------------------------------------------------------------------------- + */ +static int +tegra_channel_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct tegra_channel *chan = vb2_get_drv_priv(vq); + struct tegra_mc_vi *vi = chan->vi; + + *nplanes = 1; + + sizes[0] = chan->format.sizeimage; + alloc_devs[0] = tegra_channel_get_vi_unit(chan); + + if (vi->fops && vi->fops->vi_setup_queue) + return vi->fops->vi_setup_queue(chan, nbuffers); + else + return -EINVAL; +} + +int tegra_channel_alloc_buffer_queue(struct tegra_channel *chan, + unsigned int num_buffers) +{ + struct device *vi_unit_dev = tegra_channel_get_vi_unit(chan); + + chan->buffer_state = devm_kzalloc(vi_unit_dev, + (num_buffers * sizeof(*chan->buffer_state)), GFP_KERNEL); + if (!chan->buffer_state) + goto alloc_error; + + chan->buffers = devm_kzalloc(vi_unit_dev, + (num_buffers * sizeof(*chan->buffers)), GFP_KERNEL); + if (!chan->buffers) + goto alloc_error; + + chan->capture_queue_depth = num_buffers; + + return 0; + +alloc_error: + dev_err(chan->vi->dev, + "error: could not allocate memory for %u size buffer queue\n", + num_buffers); + + tegra_channel_dealloc_buffer_queue(chan); + + return -ENOMEM; +} +EXPORT_SYMBOL(tegra_channel_alloc_buffer_queue); + +void tegra_channel_dealloc_buffer_queue(struct tegra_channel *chan) +{ + struct device *vi_unit_dev = tegra_channel_get_vi_unit(chan); + + if (chan->buffer_state) + devm_kfree(vi_unit_dev, chan->buffer_state); + if (chan->buffers) + devm_kfree(vi_unit_dev, chan->buffers); +} + +static int tegra_channel_buffer_prepare(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct tegra_channel *chan = vb2_get_drv_priv(vb->vb2_queue); + struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf); + + buf->chan = chan; + vb2_set_plane_payload(&vbuf->vb2_buf, 0, chan->format.sizeimage); +#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) + buf->addr = vb2_dma_contig_plane_dma_addr(vb, 0); +#endif + + return 0; +} + +static void tegra_channel_buffer_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct tegra_channel *chan = vb2_get_drv_priv(vb->vb2_queue); + struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf); + + /* for bypass mode - do nothing */ + if (chan->bypass) + return; + + if (!queue_init_ts) { + /* + * Record videobuf2 queue initial timestamp. + * Note: latency is accurate when streaming is already turned ON + */ + queue_init_ts = ktime_to_ms(ktime_get()); + } + + /* Put buffer into the capture queue */ + spin_lock(&chan->start_lock); + list_add_tail(&buf->queue, &chan->capture); + spin_unlock(&chan->start_lock); + + /* Wake up kthread for capture */ + wake_up_interruptible(&chan->start_wait); +} + + +static void tegra_channel_queued_buf_done_single_thread( + struct tegra_channel *chan, + enum vb2_buffer_state state) +{ + struct tegra_channel_buffer *buf, *nbuf; + + /* delete capture list */ + spin_lock(&chan->start_lock); + list_for_each_entry_safe(buf, nbuf, &chan->capture, queue) { + vb2_buffer_done(&buf->buf.vb2_buf, state); + list_del(&buf->queue); + } + spin_unlock(&chan->start_lock); + + /* delete dequeue list */ + spin_lock(&chan->dequeue_lock); + list_for_each_entry_safe(buf, nbuf, &chan->dequeue, queue) { + vb2_buffer_done(&buf->buf.vb2_buf, state); + list_del(&buf->queue); + } + spin_unlock(&chan->dequeue_lock); +} + +static void tegra_channel_queued_buf_done_multi_thread( + struct tegra_channel *chan, + enum vb2_buffer_state state) +{ + struct tegra_channel_buffer *buf, *nbuf; + spinlock_t *lock = &chan->start_lock; + spinlock_t *release_lock = &chan->release_lock; + struct list_head *q = &chan->capture; + struct list_head *rel_q = &chan->release; + + spin_lock(lock); + list_for_each_entry_safe(buf, nbuf, q, queue) { + vb2_buffer_done(&buf->buf.vb2_buf, state); + list_del(&buf->queue); + } + spin_unlock(lock); + + /* delete release list */ + spin_lock(release_lock); + list_for_each_entry_safe(buf, nbuf, rel_q, queue) { + vb2_buffer_done(&buf->buf.vb2_buf, state); + list_del(&buf->queue); + } + spin_unlock(release_lock); +} + +/* Return all queued buffers back to videobuf2 */ +void tegra_channel_queued_buf_done(struct tegra_channel *chan, + enum vb2_buffer_state state, bool multi_queue) +{ + if (multi_queue) + tegra_channel_queued_buf_done_multi_thread(chan, state); + else + tegra_channel_queued_buf_done_single_thread(chan, state); +} +EXPORT_SYMBOL(tegra_channel_queued_buf_done); + +/* + * ----------------------------------------------------------------------------- + * subdevice set/unset operations + * ----------------------------------------------------------------------------- + */ +int tegra_channel_write_blobs(struct tegra_channel *chan) +{ + struct v4l2_subdev *sd = NULL; + struct camera_common_data *s_data = NULL; + + /* for TPG, do nothing */ + if (chan->pg_mode) + return 0; + + sd = chan->subdev_on_csi; + if (!sd) + return -EINVAL; + + s_data = to_camera_common_data(sd->dev); + if (!s_data) + return 0; + + if (!is_tvcf_supported(s_data->version)) + return 0; + + return tegracam_write_blobs(s_data->tegracam_ctrl_hdl); +} +EXPORT_SYMBOL(tegra_channel_write_blobs); + +int tegra_channel_set_stream(struct tegra_channel *chan, bool on) +{ + int num_sd; + int ret = 0; + int err = 0; + int max_deskew_attempts = 5; + int deskew_attempts = 0; + struct v4l2_subdev *sd; + + if (atomic_read(&chan->is_streaming) == on) + return 0; + trace_tegra_channel_set_stream("enable", on); + + if (on) { + tegra_camera_update_clknbw(chan, true); + /* Enable CSI before sensor. Reason is as follows: + * CSI is able to catch the very first clk transition. + */ + while (deskew_attempts < max_deskew_attempts) { + for (num_sd = 0; num_sd < chan->num_subdevs; num_sd++) { + sd = chan->subdev[num_sd]; + + trace_tegra_channel_set_stream(sd->name, on); + err = v4l2_subdev_call(sd, video, s_stream, on); + if (!ret && err < 0 && err != -ENOIOCTLCMD) + ret = err; + } + if (!chan->bypass && !chan->pg_mode && + chan->deskew_ctx->deskew_lanes) { + err = nvcsi_deskew_apply_check( + chan->deskew_ctx); + ++deskew_attempts; + if (err && deskew_attempts < + max_deskew_attempts) { + for (num_sd = 0; + num_sd < chan->num_subdevs; + num_sd++) { + sd = chan->subdev[num_sd]; + trace_tegra_channel_set_stream( + sd->name, false); + err = v4l2_subdev_call(sd, + video, + s_stream, false); + } + } else + break; + } else + break; + } + } else { + for (num_sd = chan->num_subdevs - 1; num_sd >= 0; num_sd--) { + sd = chan->subdev[num_sd]; + + trace_tegra_channel_set_stream(sd->name, on); + err = v4l2_subdev_call(sd, video, s_stream, on); + if (!ret && err < 0 && err != -ENOIOCTLCMD) + ret = err; + } + spec_bar(); + + tegra_camera_update_clknbw(chan, false); + } + + if (ret == 0) + atomic_set(&chan->is_streaming, on); + return ret; +} +EXPORT_SYMBOL(tegra_channel_set_stream); + +int tegra_channel_set_power(struct tegra_channel *chan, bool on) +{ + int num_sd; + int ret = 0; + int err = 0; + struct v4l2_subdev *sd; + + /* First power on and last power off will turn on/off the subdevices */ + if (on) { + if (atomic_add_return(1, &chan->power_on_refcnt) != 1) + return 0; + } else { + if (!atomic_dec_and_test(&chan->power_on_refcnt)) + return 0; + } + + /* Power on CSI at the last to complete calibration of mipi lanes */ + for (num_sd = chan->num_subdevs - 1; num_sd >= 0; num_sd--) { + sd = chan->subdev[num_sd]; + + trace_tegra_channel_set_power(sd->name, on); + err = v4l2_subdev_call(sd, core, s_power, on); + if (!ret && err < 0 && err != -ENOIOCTLCMD) + ret = err; + } + + return ret; +} +EXPORT_SYMBOL(tegra_channel_set_power); + +static int tegra_channel_start_streaming(struct vb2_queue *vq, u32 count) +{ + struct tegra_channel *chan = vb2_get_drv_priv(vq); + struct tegra_mc_vi *vi = chan->vi; + + if (vi->fops) { + int ret = 0; + + /* power on hw at the start of streaming */ + ret = vi->fops->vi_power_on(chan); + if (ret < 0) + return ret; + + return vi->fops->vi_start_streaming(vq, count); + } + return 0; +} + +static void tegra_channel_stop_streaming(struct vb2_queue *vq) +{ + struct tegra_channel *chan = vb2_get_drv_priv(vq); + struct tegra_mc_vi *vi = chan->vi; + + if (vi->fops) { + vi->fops->vi_stop_streaming(vq); + vi->fops->vi_power_off(chan); + } + + /* Clean-up recorded videobuf2 queue initial timestamp */ + queue_init_ts = 0; +} + +static const struct vb2_ops tegra_channel_queue_qops = { + .queue_setup = tegra_channel_queue_setup, + .buf_prepare = tegra_channel_buffer_prepare, + .buf_queue = tegra_channel_buffer_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .start_streaming = tegra_channel_start_streaming, + .stop_streaming = tegra_channel_stop_streaming, +}; + +/* ----------------------------------------------------------------------------- + * V4L2 ioctls + */ + +static int +tegra_channel_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + struct tegra_channel *chan = video_drvdata(file); + int ret = 0; + + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + + strlcpy(cap->driver, "tegra-video", sizeof(cap->driver)); + strlcpy(cap->card, chan->video->name, sizeof(cap->card)); + ret = snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u", + dev_name(chan->vi->dev), chan->port[0]); + if (ret < 0) + return -EINVAL; + + return 0; +} + +static int +tegra_channel_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *sizes) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct v4l2_subdev_frame_size_enum fse; + struct v4l2_subdev_state cfg = {}; + int ret = 0; + + /* Convert v4l2 pixel format (fourcc) into media bus format code */ + fse.code = tegra_core_get_code_by_fourcc(chan, sizes->pixel_format, 0); + if (fse.code < 0) + return -EINVAL; + fse.index = sizes->index; + fse.which = V4L2_SUBDEV_FORMAT_ACTIVE; + fse.pad = 0; + + ret = v4l2_subdev_call(sd, pad, enum_frame_size, &cfg, &fse); + + if (!ret) { + sizes->type = V4L2_FRMSIZE_TYPE_DISCRETE; + sizes->discrete.width = fse.max_width; + sizes->discrete.height = fse.max_height; + } + + return ret; +} + +static int +tegra_channel_enum_frameintervals(struct file *file, void *fh, + struct v4l2_frmivalenum *intervals) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct v4l2_subdev_frame_interval_enum fie; + struct v4l2_subdev_state cfg = {}; + int ret = 0; + + /* Convert v4l2 pixel format (fourcc) into media bus format code */ + fie.code = tegra_core_get_code_by_fourcc( + chan, intervals->pixel_format, 0); + if (fie.code < 0) + return -EINVAL; + fie.index = intervals->index; + fie.width = intervals->width; + fie.height = intervals->height; + fie.pad = 0; + fie.which = V4L2_SUBDEV_FORMAT_TRY; + + ret = v4l2_subdev_call(sd, pad, enum_frame_interval, &cfg, &fie); + + if (!ret) { + intervals->type = V4L2_FRMIVAL_TYPE_DISCRETE; + intervals->discrete.numerator = fie.interval.numerator; + intervals->discrete.denominator = fie.interval.denominator; + } + + return ret; +} + +static int +tegra_channel_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct tegra_channel *chan = video_drvdata(file); + unsigned int index = 0, i; + unsigned long *fmts_bitmap = chan->fmts_bitmap; + + if (f->index >= bitmap_weight(fmts_bitmap, MAX_FORMAT_NUM)) + return -EINVAL; + + for (i = 0; i < f->index + 1; i++, index++) + index = find_next_bit(fmts_bitmap, MAX_FORMAT_NUM, index); + + index -= 1; + f->pixelformat = tegra_core_get_fourcc_by_idx(chan, index); + + return 0; +} + +static int +tegra_channel_g_edid(struct file *file, void *fh, struct v4l2_edid *edid) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_subdev *sd = chan->subdev_on_csi; + + if (!v4l2_subdev_has_op(sd, pad, get_edid)) + return -ENOTTY; + + return v4l2_subdev_call(sd, pad, get_edid, edid); +} + +static int +tegra_channel_s_edid(struct file *file, void *fh, struct v4l2_edid *edid) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_subdev *sd = chan->subdev_on_csi; + + if (!v4l2_subdev_has_op(sd, pad, set_edid)) + return -ENOTTY; + + return v4l2_subdev_call(sd, pad, set_edid, edid); +} + +static int +tegra_channel_g_dv_timings(struct file *file, void *fh, + struct v4l2_dv_timings *timings) +{ + struct tegra_channel *chan = video_drvdata(file); + + if (!v4l2_subdev_has_op(chan->subdev_on_csi, video, g_dv_timings)) + return -ENOTTY; + + return v4l2_device_call_until_err(chan->video->v4l2_dev, + chan->grp_id, video, g_dv_timings, timings); +} + +static int +tegra_channel_s_dv_timings(struct file *file, void *fh, + struct v4l2_dv_timings *timings) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_bt_timings *bt = &timings->bt; + struct v4l2_dv_timings curr_timings; + int ret; + + if (!v4l2_subdev_has_op(chan->subdev_on_csi, video, s_dv_timings)) + return -ENOTTY; + + ret = tegra_channel_g_dv_timings(file, fh, &curr_timings); + if (ret) + return ret; + + if (tegra_v4l2_match_dv_timings(timings, &curr_timings, 0, false)) + return 0; + + if (vb2_is_busy(&chan->queue)) + return -EBUSY; + + ret = v4l2_device_call_until_err(chan->video->v4l2_dev, + chan->grp_id, video, s_dv_timings, timings); + + if (!ret) + tegra_channel_update_format(chan, bt->width, bt->height, + chan->fmtinfo->fourcc, &chan->fmtinfo->bpp, + chan->preferred_stride); + + if (chan->total_ports > 1) + update_gang_mode(chan); + + return ret; +} + +static int +tegra_channel_query_dv_timings(struct file *file, void *fh, + struct v4l2_dv_timings *timings) +{ + struct tegra_channel *chan = video_drvdata(file); + + if (!v4l2_subdev_has_op(chan->subdev_on_csi, video, query_dv_timings)) + return -ENOTTY; + + return v4l2_device_call_until_err(chan->video->v4l2_dev, + chan->grp_id, video, query_dv_timings, timings); +} + +static int +tegra_channel_enum_dv_timings(struct file *file, void *fh, + struct v4l2_enum_dv_timings *timings) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_subdev *sd = chan->subdev_on_csi; + + if (!v4l2_subdev_has_op(sd, pad, enum_dv_timings)) + return -ENOTTY; + + return v4l2_subdev_call(sd, pad, enum_dv_timings, timings); +} + +static int +tegra_channel_dv_timings_cap(struct file *file, void *fh, + struct v4l2_dv_timings_cap *cap) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_subdev *sd = chan->subdev_on_csi; + + if (!v4l2_subdev_has_op(sd, pad, dv_timings_cap)) + return -ENOTTY; + + return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); +} + +int tegra_channel_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct tegra_channel *chan = container_of(ctrl->handler, + struct tegra_channel, ctrl_handler); + int err = 0; + + switch (ctrl->id) { + case TEGRA_CAMERA_CID_GAIN_TPG: + { + if (chan->vi->csi != NULL && + chan->vi->csi->tpg_gain_ctrl) { + struct v4l2_subdev *sd = chan->subdev_on_csi; + + err = tegra_csi_tpg_set_gain(sd, &(ctrl->val)); + } + } + break; + case TEGRA_CAMERA_CID_VI_BYPASS_MODE: + if (switch_ctrl_qmenu[ctrl->val] == SWITCH_ON) + chan->bypass = true; + else if (chan->vi->bypass) { + dev_dbg(&chan->video->dev, + "can't disable bypass mode\n"); + dev_dbg(&chan->video->dev, + "because the VI/CSI is in bypass mode\n"); + chan->bypass = true; + } else + chan->bypass = false; + break; + case TEGRA_CAMERA_CID_OVERRIDE_ENABLE: + { + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct camera_common_data *s_data = + to_camera_common_data(sd->dev); + + if (!s_data) + break; + if (switch_ctrl_qmenu[ctrl->val] == SWITCH_ON) { + s_data->override_enable = true; + dev_dbg(&chan->video->dev, + "enable override control\n"); + } else { + s_data->override_enable = false; + dev_dbg(&chan->video->dev, + "disable override control\n"); + } + } + break; + case TEGRA_CAMERA_CID_VI_HEIGHT_ALIGN: + chan->height_align = ctrl->val; + tegra_channel_update_format(chan, chan->format.width, + chan->format.height, + chan->format.pixelformat, + &chan->fmtinfo->bpp, 0); + break; + case TEGRA_CAMERA_CID_VI_SIZE_ALIGN: + chan->size_align = size_align_ctrl_qmenu[ctrl->val]; + tegra_channel_update_format(chan, chan->format.width, + chan->format.height, + chan->format.pixelformat, + &chan->fmtinfo->bpp, 0); + break; + case TEGRA_CAMERA_CID_LOW_LATENCY: + chan->low_latency = ctrl->val; + break; + case TEGRA_CAMERA_CID_VI_PREFERRED_STRIDE: + chan->preferred_stride = ctrl->val; + tegra_channel_update_format(chan, chan->format.width, + chan->format.height, + chan->format.pixelformat, + &chan->fmtinfo->bpp, + chan->preferred_stride); + break; + default: + dev_err(&chan->video->dev, "%s: Invalid ctrl %u\n", + __func__, ctrl->id); + err = -EINVAL; + } + + return err; +} + +static const struct v4l2_ctrl_ops channel_ctrl_ops = { + .s_ctrl = tegra_channel_s_ctrl, +}; + +static const struct v4l2_ctrl_config common_custom_ctrls[] = { + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_GAIN_TPG, + .name = "TPG Gain Ctrl", + .type = V4L2_CTRL_TYPE_INTEGER, + .min = 1, + .max = 64, + .step = 1, + .def = 1, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_GAIN_TPG_EMB_DATA_CFG, + .name = "TPG embedded data config", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .flags = V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = 1, + .step = 1, + .def = 0, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_VI_BYPASS_MODE, + .name = "Bypass Mode", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .def = 0, + .min = 0, + .max = ARRAY_SIZE(switch_ctrl_qmenu) - 1, + .menu_skip_mask = 0, + .qmenu_int = switch_ctrl_qmenu, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_OVERRIDE_ENABLE, + .name = "Override Enable", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .def = 0, + .min = 0, + .max = ARRAY_SIZE(switch_ctrl_qmenu) - 1, + .menu_skip_mask = 0, + .qmenu_int = switch_ctrl_qmenu, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_VI_HEIGHT_ALIGN, + .name = "Height Align", + .type = V4L2_CTRL_TYPE_INTEGER, + .min = 1, + .max = 16, + .step = 1, + .def = 1, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_VI_SIZE_ALIGN, + .name = "Size Align", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .def = TEGRA_SIZE_ALIGNMENT, + .min = 0, + .max = ARRAY_SIZE(size_align_ctrl_qmenu) - 1, + .menu_skip_mask = 0, + .qmenu_int = size_align_ctrl_qmenu, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_MODES, + .name = "Sensor Modes", + .type = V4L2_CTRL_TYPE_INTEGER, + .flags = V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = MAX_NUM_SENSOR_MODES, + .def = MAX_NUM_SENSOR_MODES, + .step = 1, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_SIGNAL_PROPERTIES, + .name = "Sensor Signal Properties", + .type = V4L2_CTRL_TYPE_U32, + .flags = V4L2_CTRL_FLAG_HAS_PAYLOAD | + V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = 0xFFFFFFFF, + .step = 1, + .def = 0, + .dims = { MAX_NUM_SENSOR_MODES, + SENSOR_SIGNAL_PROPERTIES_CID_SIZE }, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_IMAGE_PROPERTIES, + .name = "Sensor Image Properties", + .type = V4L2_CTRL_TYPE_U32, + .flags = V4L2_CTRL_FLAG_HAS_PAYLOAD | + V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = 0xFFFFFFFF, + .step = 1, + .def = 0, + .dims = { MAX_NUM_SENSOR_MODES, + SENSOR_IMAGE_PROPERTIES_CID_SIZE }, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_CONTROL_PROPERTIES, + .name = "Sensor Control Properties", + .type = V4L2_CTRL_TYPE_U32, + .flags = V4L2_CTRL_FLAG_HAS_PAYLOAD | + V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = 0xFFFFFFFF, + .step = 1, + .def = 0, + .dims = { MAX_NUM_SENSOR_MODES, + SENSOR_CONTROL_PROPERTIES_CID_SIZE }, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_DV_TIMINGS, + .name = "Sensor DV Timings", + .type = V4L2_CTRL_TYPE_U32, + .flags = V4L2_CTRL_FLAG_HAS_PAYLOAD | + V4L2_CTRL_FLAG_READ_ONLY, + .min = 0, + .max = 0xFFFFFFFF, + .step = 1, + .def = 0, + .dims = { MAX_NUM_SENSOR_MODES, + SENSOR_DV_TIMINGS_CID_SIZE }, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_LOW_LATENCY, + .name = "Low Latency Mode", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .def = 0, + .min = 0, + .max = 1, + .step = 1, + }, + { + .ops = &channel_ctrl_ops, + .id = TEGRA_CAMERA_CID_VI_PREFERRED_STRIDE, + .name = "Preferred Stride", + .type = V4L2_CTRL_TYPE_INTEGER, + .min = 0, + .max = 65535, + .step = 1, + .def = 0, + }, +}; + +#define GET_TEGRA_CAMERA_CTRL(id, c) \ +do { \ + c = v4l2_ctrl_find(&chan->ctrl_handler, TEGRA_CAMERA_CID_##id); \ + if (!c) { \ + dev_err(chan->vi->dev, "%s: could not find ctrl %s\n", \ + __func__, "##id"); \ + return -EINVAL; \ + } \ +} while (0) + +static int tegra_channel_sensorprops_setup(struct tegra_channel *chan) +{ + const struct v4l2_subdev *sd = chan->subdev_on_csi; + const struct camera_common_data *s_data = + to_camera_common_data(sd->dev); + const struct sensor_mode_properties *modes; + struct v4l2_ctrl *ctrl_modes; + struct v4l2_ctrl *ctrl_signalprops; + struct v4l2_ctrl *ctrl_imageprops; + struct v4l2_ctrl *ctrl_controlprops; + struct v4l2_ctrl *ctrl_dvtimings; + u32 i; + + if (!s_data) + return 0; + + GET_TEGRA_CAMERA_CTRL(SENSOR_MODES, ctrl_modes); + GET_TEGRA_CAMERA_CTRL(SENSOR_SIGNAL_PROPERTIES, ctrl_signalprops); + GET_TEGRA_CAMERA_CTRL(SENSOR_IMAGE_PROPERTIES, ctrl_imageprops); + GET_TEGRA_CAMERA_CTRL(SENSOR_CONTROL_PROPERTIES, ctrl_controlprops); + GET_TEGRA_CAMERA_CTRL(SENSOR_DV_TIMINGS, ctrl_dvtimings); + + ctrl_modes->val = s_data->sensor_props.num_modes; + ctrl_modes->cur.val = s_data->sensor_props.num_modes; + + /* Update the control sizes + * + * Note that the structs have size elems * sizeof(u32) + * So to get the number of structs (elems * sizeof(u32)) / num_modes + */ + ctrl_signalprops->elems = s_data->sensor_props.num_modes * + SENSOR_SIGNAL_PROPERTIES_CID_SIZE; + + ctrl_imageprops->elems = s_data->sensor_props.num_modes * + SENSOR_IMAGE_PROPERTIES_CID_SIZE; + + ctrl_controlprops->elems = s_data->sensor_props.num_modes * + SENSOR_CONTROL_PROPERTIES_CID_SIZE; + + ctrl_dvtimings->elems = s_data->sensor_props.num_modes * + SENSOR_DV_TIMINGS_CID_SIZE; + + modes = s_data->sensor_props.sensor_modes; + for (i = 0; i < s_data->sensor_props.num_modes; i++) { + void *ptr = NULL; + u32 size; + + size = sizeof(struct sensor_signal_properties); + ptr = ctrl_signalprops->p_new.p + (i * size); + memcpy(ptr, &modes[i].signal_properties, size); + + size = sizeof(struct sensor_image_properties); + ptr = ctrl_imageprops->p_new.p + (i * size); + memcpy(ptr, &modes[i].image_properties, size); + + size = sizeof(struct sensor_control_properties); + ptr = ctrl_controlprops->p_new.p + (i * size); + memcpy(ptr, &modes[i].control_properties, size); + + size = sizeof(struct sensor_dv_timings); + ptr = ctrl_dvtimings->p_new.p + (i * size); + memcpy(ptr, &modes[i].dv_timings, size); + } + spec_bar(); + + /* Do not copy memory into p_cur block, reuse p_new */ + ctrl_signalprops->p_cur.p = ctrl_signalprops->p_new.p; + ctrl_imageprops->p_cur.p = ctrl_imageprops->p_new.p; + ctrl_controlprops->p_cur.p = ctrl_controlprops->p_new.p; + ctrl_dvtimings->p_cur.p = ctrl_dvtimings->p_new.p; + + return 0; +} + +static int tegra_channel_setup_controls(struct tegra_channel *chan) +{ + int num_sd = 0; + struct v4l2_subdev *sd = NULL; + struct tegra_mc_vi *vi = chan->vi; + struct v4l2_ctrl *ctrl; + int i; + int ret = 0; + + /* Clear and reinit control handler - Bug 1956853 */ + v4l2_ctrl_handler_free(&chan->ctrl_handler); + v4l2_ctrl_handler_init(&chan->ctrl_handler, MAX_CID_CONTROLS); + + /* Initialize the subdev and controls here at first open */ + sd = chan->subdev[num_sd]; + while ((sd = chan->subdev[num_sd++]) && + (num_sd <= chan->num_subdevs)) { + /* Add control handler for the subdevice */ + ret = v4l2_ctrl_add_handler(&chan->ctrl_handler, + sd->ctrl_handler, NULL, false); + if (ret || chan->ctrl_handler.error) + dev_err(chan->vi->dev, + "Failed to add sub-device controls\n"); + } + + /* Add new custom controls */ + for (i = 0; i < ARRAY_SIZE(common_custom_ctrls); i++) { + switch (common_custom_ctrls[i].id) { + case TEGRA_CAMERA_CID_OVERRIDE_ENABLE: + /* don't create override control for pg mode */ + if (chan->pg_mode) + continue; + break; + case TEGRA_CAMERA_CID_GAIN_TPG: + /* Skip the custom control for sensor and + * for TPG which doesn't support gain control + */ + if ((vi->csi == NULL) || (chan->pg_mode && + !vi->csi->tpg_gain_ctrl)) + continue; + break; + case TEGRA_CAMERA_CID_GAIN_TPG_EMB_DATA_CFG: + /* Skip the custom control for sensor and + * for TPG which doesn't support embedded + * data with TPG config data. + */ + if ((vi->csi == NULL) || (chan->pg_mode && + !vi->csi->tpg_emb_data_config)) + continue; + break; + default: + break; + } + ctrl = v4l2_ctrl_new_custom(&chan->ctrl_handler, + &common_custom_ctrls[i], NULL); + if (!ctrl) { + dev_err(chan->vi->dev, + "Failed to add %s ctrl\n", + common_custom_ctrls[i].name); + return chan->ctrl_handler.error; + } + + /* Initialize the sensor arrays to have zero elements + * This should keep accesses to only the modes + * later defined in the DT + */ + if (ctrl->is_array) + ctrl->elems = 0; + } + + vi->fops->vi_add_ctrls(chan); + + if (chan->pg_mode) { + ret = v4l2_ctrl_add_handler(&chan->ctrl_handler, + &chan->vi->ctrl_handler, NULL, false); + if (ret || chan->ctrl_handler.error) + dev_err(chan->vi->dev, + "Failed to add VI controls\n"); + } + + /* setup the controls */ + ret = v4l2_ctrl_handler_setup(&chan->ctrl_handler); + if (ret < 0) + goto error; + + return 0; + +error: + v4l2_ctrl_handler_free(&chan->ctrl_handler); + return ret; +} + +static void tegra_channel_free_sensor_properties( + const struct v4l2_subdev *sensor_sd) +{ + struct camera_common_data *s_data; + struct tegra_csi_device *csi = tegra_get_mc_csi(); + struct tegra_csi_channel *chan; + + if (sensor_sd == NULL) + return; + + s_data = to_camera_common_data(sensor_sd->dev); + if (s_data == NULL) + return; + + if (s_data->sensor_props.sensor_modes) + devm_kfree(s_data->dev, s_data->sensor_props.sensor_modes); + + s_data->sensor_props.sensor_modes = NULL; + + /* remove reference to s_data */ + list_for_each_entry(chan, &csi->csi_chans, list) { + if (chan->sensor_sd == sensor_sd) + chan->s_data = NULL; + } +} + +static int tegra_channel_connect_sensor( + struct tegra_channel *chan, struct v4l2_subdev *sensor_sd) +{ + struct device *sensor_dev; + struct device_node *sensor_of_node; + struct tegra_csi_device *csi_device; + struct device_node *ep_node; + + if (!chan) + return -EINVAL; + + if (!sensor_sd) + return -EINVAL; + + sensor_dev = sensor_sd->dev; + if (!sensor_dev) + return -EINVAL; + + sensor_of_node = sensor_dev->of_node; + if (!sensor_of_node) + return -EINVAL; + + csi_device = tegra_get_mc_csi(); + WARN_ON(!csi_device); + if (!csi_device) + return -ENODEV; + + for_each_endpoint_of_node(sensor_of_node, ep_node) { + struct device_node *csi_chan_of_node; + struct tegra_csi_channel *csi_chan; + + csi_chan_of_node = + of_graph_get_remote_port_parent(ep_node); + + list_for_each_entry(csi_chan, &csi_device->csi_chans, list) { + if (csi_chan->of_node == csi_chan_of_node) { + csi_chan->s_data = + to_camera_common_data(chan->subdev_on_csi->dev); + csi_chan->sensor_sd = chan->subdev_on_csi; + break; + } + } + + of_node_put(csi_chan_of_node); + + } + + return 0; +} + +static int map_to_sensor_type(u32 phy_mode) +{ + switch (phy_mode) { + case CSI_PHY_MODE_DPHY: + return SENSORTYPE_DPHY; + case CSI_PHY_MODE_CPHY: + return SENSORTYPE_CPHY; + case SLVS_EC: + return SENSORTYPE_SLVSEC; + default: + return SENSORTYPE_OTHER; + } +} + +static u64 tegra_channel_get_max_pixelclock(struct tegra_channel *chan) +{ + int i = 0; + u64 val = 0, pixelclock = 0; + + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct camera_common_data *s_data = + to_camera_common_data(sd->dev); + struct sensor_mode_properties *sensor_mode; + + if (!s_data) + return 0; + + for (i = 0; i < s_data->sensor_props.num_modes; i++) { + sensor_mode = &s_data->sensor_props.sensor_modes[i]; + if (sensor_mode->signal_properties.serdes_pixel_clock.val != 0ULL) + val = sensor_mode->signal_properties.serdes_pixel_clock.val; + else + val = sensor_mode->signal_properties.pixel_clock.val; + /* Select the mode with largest pixel rate */ + if (pixelclock < val) + pixelclock = val; + } + spec_bar(); + + return pixelclock; +} + +static u32 tegra_channel_get_num_lanes(struct tegra_channel *chan) +{ + u32 num_lanes = 0; + + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct camera_common_data *s_data = + to_camera_common_data(sd->dev); + struct sensor_mode_properties *sensor_mode; + + if (!s_data) + return 0; + + sensor_mode = &s_data->sensor_props.sensor_modes[0]; + num_lanes = sensor_mode->signal_properties.num_lanes; + + return num_lanes; +} + +static u32 tegra_channel_get_sensor_type(struct tegra_channel *chan) +{ + u32 phy_mode = 0, sensor_type = 0; + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct camera_common_data *s_data = + to_camera_common_data(sd->dev); + struct sensor_mode_properties *sensor_mode; + + if (!s_data) + return 0; + + /* Select phy mode based on the first mode */ + sensor_mode = &s_data->sensor_props.sensor_modes[0]; + phy_mode = sensor_mode->signal_properties.phy_mode; + sensor_type = map_to_sensor_type(phy_mode); + + return sensor_type; +} + +static u64 tegra_channel_get_max_source_rate(void) +{ + /* WAR: bug 2095503 */ + /* TODO very large hard-coded rate based on 4k@60 fps */ + /* implement proper functionality here. */ + u64 pixelrate = HDMI_IN_RATE; + return pixelrate; +} + +static void tegra_channel_populate_dev_info(struct tegra_camera_dev_info *cdev, + struct tegra_channel *chan) +{ + u64 pixelclock = 0; + struct camera_common_data *s_data = + to_camera_common_data(chan->subdev_on_csi->dev); + + if (s_data != NULL) { + /* camera sensors */ + cdev->sensor_type = tegra_channel_get_sensor_type(chan); + pixelclock = tegra_channel_get_max_pixelclock(chan); + /* Multiply by CPHY symbols to pixels factor. */ + if (cdev->sensor_type == SENSORTYPE_CPHY) + pixelclock *= 16/7; + cdev->lane_num = tegra_channel_get_num_lanes(chan); + } else { + if (chan->pg_mode) { + /* TPG mode */ + cdev->sensor_type = SENSORTYPE_VIRTUAL; + } else if (v4l2_subdev_has_op(chan->subdev_on_csi, + video, g_dv_timings)) { + /* HDMI-IN */ + cdev->sensor_type = SENSORTYPE_OTHER; + pixelclock = tegra_channel_get_max_source_rate(); + } else { + /* Focusers, no pixel clk and ISO BW, just bail out */ + return; + } + } + + cdev->pixel_rate = pixelclock; + cdev->pixel_bit_depth = chan->fmtinfo->width; + cdev->bpp = chan->fmtinfo->bpp.numerator; + /* BW in kBps */ + cdev->bw = cdev->pixel_rate * cdev->bpp / 1024; + cdev->bw /= 8; +} + +void tegra_channel_remove_subdevices(struct tegra_channel *chan) +{ + tegra_channel_free_sensor_properties(chan->subdev_on_csi); + video_unregister_device(chan->video); + chan->video = NULL; + chan->num_subdevs = 0; + chan->subdev_on_csi = NULL; +} +EXPORT_SYMBOL(tegra_channel_remove_subdevices); + +int tegra_channel_init_subdevices(struct tegra_channel *chan) +{ + int ret = 0; + struct media_entity *entity; + struct media_pad *pad; + struct v4l2_subdev *sd; + int index = 0; + u8 num_sd = 0; + struct tegra_camera_dev_info camdev_info; + int grp_id = chan->pg_mode ? (TPG_CSI_GROUP_ID + chan->port[0] + 1) + : chan->port[0] + 1; + int len = 0; + + /* set_stream of CSI */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + pad = media_entity_remote_pad(&chan->pad); +#else + pad = media_pad_remote_pad_first(&chan->pad); +#endif + if (!pad) + return -ENODEV; + + entity = pad->entity; + sd = media_entity_to_v4l2_subdev(entity); + v4l2_set_subdev_hostdata(sd, chan); + chan->subdev[num_sd++] = sd; + + /* verify if the immediate subdevice is slvsec */ + chan->is_slvsec = (strstr(sd->name, "slvs") != NULL) ? 1 : 0; + + /* Add subdev name to this video dev name with vi-output tag*/ + len = snprintf(chan->video->name, sizeof(chan->video->name), "%s, %s", + "vi-output", sd->name); + if (len < 0) + return -EINVAL; + + sd->grp_id = grp_id; + chan->grp_id = grp_id; + index = pad->index - 1; + while (index >= 0) { + pad = &entity->pads[index]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + break; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + pad = media_entity_remote_pad(pad); +#else + pad = media_pad_remote_pad_first(pad); +#endif + if (pad == NULL || !tegra_is_v4l2_subdev(pad->entity)) + break; + + if (num_sd >= MAX_SUBDEVICES) + break; + + entity = pad->entity; + sd = media_entity_to_v4l2_subdev(entity); + v4l2_set_subdev_hostdata(sd, chan); + sd->grp_id = grp_id; + chan->subdev[num_sd++] = sd; + /* Add subdev name to this video dev name with vi-output tag*/ + len = snprintf(chan->video->name, sizeof(chan->video->name), "%s, %s", + "vi-output", sd->name); + if (len < 0) + return -EINVAL; + + index = pad->index - 1; + } + spec_bar(); /** for num_sd < MAX_SUBDEVICES */ + + chan->num_subdevs = num_sd; + /* + * Each CSI channel has only one final remote source, + * Mark that subdev as subdev_on_csi + */ + chan->subdev_on_csi = sd; + + /* initialize the available formats */ + if (chan->num_subdevs) + tegra_channel_fmts_bitmap_init(chan); + + ret = tegra_channel_setup_controls(chan); + if (ret < 0) { + dev_err(chan->vi->dev, "%s: failed to setup controls\n", + __func__); + goto fail; + } + + memset(&camdev_info, 0, sizeof(camdev_info)); + + /* + * If subdev on csi is csi or channel is in pg mode + * then don't look for sensor props + */ + if (strstr(chan->subdev_on_csi->name, "nvcsi") != NULL || + chan->pg_mode) { + tegra_channel_populate_dev_info(&camdev_info, chan); + ret = tegra_camera_device_register(&camdev_info, chan); + return ret; + } + + ret = tegra_channel_sensorprops_setup(chan); + if (ret < 0) { + dev_err(chan->vi->dev, "%s: failed to setup sensor props\n", + __func__); + goto fail; + } + + /* Add a link for the camera_common_data in the tegra_csi_channel. */ + ret = tegra_channel_connect_sensor(chan, chan->subdev_on_csi); + if (ret < 0) { + dev_err(chan->vi->dev, + "%s: failed to connect sensor to channel\n", __func__); + goto fail; + } + + tegra_channel_populate_dev_info(&camdev_info, chan); + ret = tegra_camera_device_register(&camdev_info, chan); + + return ret; +fail: + tegra_channel_free_sensor_properties(chan->subdev_on_csi); + return ret; +} +EXPORT_SYMBOL(tegra_channel_init_subdevices); + +struct v4l2_subdev *tegra_channel_find_linked_csi_subdev( + struct tegra_channel *chan) +{ + struct tegra_csi_device *csi = tegra_get_mc_csi(); + struct tegra_csi_channel *csi_it; + int i = 0; + + list_for_each_entry(csi_it, &csi->csi_chans, list) { + for (i = 0; i < chan->num_subdevs; i++) + if (chan->subdev[i] == &csi_it->subdev) + return chan->subdev[i]; + } + + return NULL; +} +EXPORT_SYMBOL(tegra_channel_find_linked_csi_subdev); + +static int +tegra_channel_get_format(struct file *file, void *fh, + struct v4l2_format *format) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_pix_format *pix = &format->fmt.pix; + + *pix = chan->format; + + return 0; +} + +static int +__tegra_channel_try_format(struct tegra_channel *chan, + struct v4l2_pix_format *pix) +{ + const struct tegra_video_format *vfmt; + struct v4l2_subdev_format fmt; + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct v4l2_subdev_state cfg = {}; + int ret = 0; + + /* Use the channel format if pixformat is not supported */ + vfmt = tegra_core_get_format_by_fourcc(chan, pix->pixelformat); + if (!vfmt) { + pix->pixelformat = chan->format.pixelformat; + vfmt = tegra_core_get_format_by_fourcc(chan, pix->pixelformat); + if (!vfmt) + return -EINVAL; + } + + fmt.which = V4L2_SUBDEV_FORMAT_TRY; + fmt.pad = 0; + v4l2_fill_mbus_format(&fmt.format, pix, vfmt->code); + + ret = v4l2_subdev_call(sd, pad, set_fmt, &cfg, &fmt); + if (ret == -ENOIOCTLCMD) + return -ENOTTY; + + v4l2_fill_pix_format(pix, &fmt.format); + + tegra_channel_fmt_align(chan, vfmt, + &pix->width, &pix->height, &pix->bytesperline); + pix->sizeimage = get_aligned_buffer_size(chan, + pix->bytesperline, pix->height); + if (chan->fmtinfo->fourcc == V4L2_PIX_FMT_NV16) + pix->sizeimage *= 2; + + return ret; +} + +static int +tegra_channel_try_format(struct file *file, void *fh, + struct v4l2_format *format) +{ + struct tegra_channel *chan = video_drvdata(file); + + return __tegra_channel_try_format(chan, &format->fmt.pix); +} + +static int +__tegra_channel_set_format(struct tegra_channel *chan, + struct v4l2_pix_format *pix) +{ + const struct tegra_video_format *vfmt; + struct v4l2_subdev_format fmt; + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct v4l2_subdev_state cfg = {}; + int ret = 0; + + vfmt = tegra_core_get_format_by_fourcc(chan, pix->pixelformat); + if (!vfmt) + return -EINVAL; + + fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt.pad = 0; + v4l2_fill_mbus_format(&fmt.format, pix, vfmt->code); + + ret = v4l2_subdev_call(sd, pad, set_fmt, &cfg, &fmt); + if (ret == -ENOIOCTLCMD) + return -ENOTTY; + + v4l2_fill_pix_format(pix, &fmt.format); + if (!ret) { + chan->format = *pix; + chan->fmtinfo = vfmt; + + if (chan->preferred_stride) + pix->bytesperline = chan->preferred_stride; + + tegra_channel_update_format(chan, pix->width, + pix->height, vfmt->fourcc, &vfmt->bpp, + pix->bytesperline); + + *pix = chan->format; + + if (chan->total_ports > 1) + update_gang_mode(chan); + } + + return ret; +} + +static int +tegra_channel_set_format(struct file *file, void *fh, + struct v4l2_format *format) +{ + struct tegra_channel *chan = video_drvdata(file); + int ret = 0; + + /* get the suppod format by try_fmt */ + ret = __tegra_channel_try_format(chan, &format->fmt.pix); + if (ret) + return ret; + + if (vb2_is_busy(&chan->queue)) + return -EBUSY; + + return __tegra_channel_set_format(chan, &format->fmt.pix); +} + +static int tegra_channel_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_SOURCE_CHANGE: + return v4l2_event_subscribe(fh, sub, 4, NULL); + } + return v4l2_ctrl_subscribe_event(fh, sub); +} + +static int +tegra_channel_enum_input(struct file *file, void *fh, struct v4l2_input *inp) +{ + struct tegra_channel *chan = video_drvdata(file); + struct v4l2_subdev *sd_on_csi = chan->subdev_on_csi; + int ret, len; + + if (inp->index) + return -EINVAL; + + ret = v4l2_device_call_until_err(chan->video->v4l2_dev, + chan->grp_id, video, g_input_status, &inp->status); + + if (ret == -ENODEV || sd_on_csi == NULL) + return -ENODEV; + + inp->type = V4L2_INPUT_TYPE_CAMERA; + if (v4l2_subdev_has_op(sd_on_csi, video, s_dv_timings)) { + inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; + len = snprintf(inp->name, + sizeof(inp->name), "HDMI %u", + chan->port[0]); + if (len < 0) + return -EINVAL; + } else { + len = snprintf(inp->name, + sizeof(inp->name), "Camera %u", + chan->port[0]); + if (len < 0) + return -EINVAL; + } + + return ret; +} + +static int tegra_channel_g_input(struct file *file, void *priv, unsigned int *i) +{ + *i = 0; + return 0; +} + +static int tegra_channel_s_input(struct file *file, void *priv, unsigned int i) +{ + if (i > 0) + return -EINVAL; + return 0; +} + +static int tegra_channel_log_status(struct file *file, void *priv) +{ + struct tegra_channel *chan = video_drvdata(file); + + v4l2_device_call_all(chan->video->v4l2_dev, + chan->grp_id, core, log_status); + return 0; +} + +static long tegra_channel_default_ioctl(struct file *file, void *fh, + bool use_prio, unsigned int cmd, void *arg) +{ + struct tegra_channel *chan = video_drvdata(file); + struct tegra_mc_vi *vi = chan->vi; + long ret = 0; + + if (vi->fops && vi->fops->vi_default_ioctl) + ret = vi->fops->vi_default_ioctl(file, fh, use_prio, cmd, arg); + + return ret; +} + +static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = { + .vidioc_querycap = tegra_channel_querycap, + .vidioc_enum_framesizes = tegra_channel_enum_framesizes, + .vidioc_enum_frameintervals = tegra_channel_enum_frameintervals, + .vidioc_enum_fmt_vid_cap = tegra_channel_enum_format, + .vidioc_g_fmt_vid_cap = tegra_channel_get_format, + .vidioc_s_fmt_vid_cap = tegra_channel_set_format, + .vidioc_try_fmt_vid_cap = tegra_channel_try_format, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_g_edid = tegra_channel_g_edid, + .vidioc_s_edid = tegra_channel_s_edid, + .vidioc_s_dv_timings = tegra_channel_s_dv_timings, + .vidioc_g_dv_timings = tegra_channel_g_dv_timings, + .vidioc_query_dv_timings = tegra_channel_query_dv_timings, + .vidioc_enum_dv_timings = tegra_channel_enum_dv_timings, + .vidioc_dv_timings_cap = tegra_channel_dv_timings_cap, + .vidioc_subscribe_event = tegra_channel_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_enum_input = tegra_channel_enum_input, + .vidioc_g_input = tegra_channel_g_input, + .vidioc_s_input = tegra_channel_s_input, + .vidioc_log_status = tegra_channel_log_status, + .vidioc_default = tegra_channel_default_ioctl, +}; + +static int tegra_channel_close(struct file *fp); +static int tegra_channel_open(struct file *fp) +{ + int ret; + struct video_device *vdev = video_devdata(fp); + struct tegra_channel *chan = video_drvdata(fp); + struct tegra_mc_vi *vi; + struct tegra_csi_device *csi; + + trace_tegra_channel_open(vdev->name); + mutex_lock(&chan->video_lock); + ret = v4l2_fh_open(fp); + if (ret || !v4l2_fh_is_singular_file(fp)) { + mutex_unlock(&chan->video_lock); + return ret; + } + + if (chan->subdev[0] == NULL) { + ret = -ENODEV; + goto fail; + } + + vi = chan->vi; + csi = vi->csi; + + chan->fh = (struct v4l2_fh *)fp->private_data; + + if (tegra_channel_verify_focuser(chan)) { + ret = tegra_channel_set_power(chan, true); + if (ret < 0) + return ret; + } + + + mutex_unlock(&chan->video_lock); + return 0; + +fail: + _vb2_fop_release(fp, NULL); + mutex_unlock(&chan->video_lock); + return ret; +} + +static int tegra_channel_close(struct file *fp) +{ + int ret = 0; + struct video_device *vdev = video_devdata(fp); + struct tegra_channel *chan = video_drvdata(fp); + struct tegra_mc_vi *vi = chan->vi; + bool is_singular; + + trace_tegra_channel_close(vdev->name); + mutex_lock(&chan->video_lock); + is_singular = v4l2_fh_is_singular_file(fp); + ret = _vb2_fop_release(fp, NULL); + + if (!is_singular) { + mutex_unlock(&chan->video_lock); + return ret; + } + + if (tegra_channel_verify_focuser(chan)) { + ret = tegra_channel_set_power(chan, false); + if (ret < 0) + dev_err(vi->dev, "Failed to power off subdevices\n"); + } + + mutex_unlock(&chan->video_lock); + return ret; +} + +/* ----------------------------------------------------------------------------- + * V4L2 file operations + */ +static const struct v4l2_file_operations tegra_channel_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = video_ioctl2, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = video_ioctl2, +#endif + .open = tegra_channel_open, + .release = tegra_channel_close, + .read = vb2_fop_read, + .poll = vb2_fop_poll, + .mmap = vb2_fop_mmap, +}; + +int tegra_vi_get_port_info(struct tegra_channel *chan, + struct device_node *node, unsigned int index) +{ + struct device_node *ep = NULL; + struct device_node *ports; + struct device_node *port; + int value = 0xFFFF; + int ret = 0; + u32 i = 0; + + ports = of_get_child_by_name(node, "ports"); + if (ports == NULL) + ports = node; + + for_each_child_of_node(ports, port) { + if (!port->name || of_node_cmp(port->name, "port")) + continue; + + ret = of_property_read_u32(port, "reg", &value); + if (ret < 0) + continue; + + if (value != index) + continue; + + for_each_child_of_node(port, ep) { + if (!ep->name || of_node_cmp(ep->name, "endpoint")) + continue; + + /* Get virtual channel id */ + ret = of_property_read_u32(ep, "vc-id", &value); + + /* vc-id is optional, default is 0 */ + chan->virtual_channel = (ret < 0) ? 0 : value; + + /* Consider max simultaneous sensor streams to be 16 */ + if (value > 16) { + dev_err(chan->vi->dev, "vc id >16!\n"); + return -EINVAL; + } + + /* Get CSI port */ + ret = of_property_read_u32(ep, "port-index", &value); + if (ret < 0) + dev_err(chan->vi->dev, "port index error\n"); + chan->port[0] = value; + + if (value > NVCSI_PORT_H) { + dev_err(chan->vi->dev, "port index >%d!\n", + NVCSI_PORT_H); + return -EINVAL; + } + + /* Get number of data lanes for the endpoint */ + ret = of_property_read_u32(ep, "bus-width", &value); + if (ret < 0) + dev_err(chan->vi->dev, "num lanes error\n"); + chan->numlanes = value; + + if (value > 12) { + dev_err(chan->vi->dev, "num lanes >12!\n"); + return -EINVAL; + } + /* + * for numlanes greater than 4 multiple CSI bricks + * are needed to capture the image, the logic below + * checks for numlanes > 4 and add a new CSI brick + * as a valid port. Loops around the three CSI + * bricks to add as many ports necessary. + */ + value -= 4; + for (i = 1; value > 0 && i < TEGRA_CSI_BLOCKS; i++, value -= 4) { + int next_port = chan->port[i-1] + 2; + + next_port = (next_port % (NVCSI_PORT_H + 1)); + chan->port[i] = next_port; + } + } + } + + return ret; +} + +static int tegra_channel_csi_init(struct tegra_channel *chan) +{ + int idx = 0; + struct tegra_mc_vi *vi = chan->vi; + int ret = 0; + + chan->gang_mode = CAMERA_NO_GANG_MODE; + chan->total_ports = 0; + memset(&chan->port[0], INVALID_CSI_PORT, TEGRA_CSI_BLOCKS); + memset(&chan->syncpoint_fifo[0], 0, sizeof(chan->syncpoint_fifo)); + if (chan->pg_mode) { + /* If VI has 4 existing channels, chan->id will start + * from 4 for the first TPG channel, which uses PORT_A(0). + * To get the correct PORT number, subtract existing number of + * channels from chan->id. + */ + chan->port[0] = (chan->id - vi->num_channels) + % NUM_TPG_INSTANCE; + chan->virtual_channel = (chan->id - vi->num_channels) + / NUM_TPG_INSTANCE; + + WARN_ON(chan->port[0] > vi->csi->num_tpg_channels); + chan->numlanes = 2; + } else { + ret = tegra_vi_get_port_info(chan, vi->dev->of_node, chan->id); + if (ret) { + dev_err(vi->dev, "%s:Fail to parse port info\n", + __func__); + return ret; + } + } + + for (idx = 0; idx < TEGRA_CSI_BLOCKS && csi_port_is_valid(chan->port[idx]); idx++) { + chan->total_ports++; + /* maximum of 4 lanes are present per CSI block */ + chan->csibase[idx] = vi->iomem + + TEGRA_VI_CSI_BASE(chan->port[idx]); + } + /* based on gang mode valid ports will be updated - set default to 1 */ + chan->valid_ports = chan->total_ports ? 1 : 0; + return ret; +} + +int tegra_channel_init_video(struct tegra_channel *chan) +{ + struct tegra_mc_vi *vi = chan->vi; + int ret = 0, len = 0; + + if (chan->video) { + dev_err(&chan->video->dev, "video device already allocated\n"); + return 0; + } + + chan->video = video_device_alloc(); + + /* Initialize the media entity... */ + chan->pad.flags = MEDIA_PAD_FL_SINK; + ret = tegra_media_entity_init(&chan->video->entity, 1, + &chan->pad, false, false); + if (ret < 0) { + video_device_release(chan->video); + dev_err(&chan->video->dev, "failed to init video entity\n"); + return ret; + } + + /* init control handler */ + ret = v4l2_ctrl_handler_init(&chan->ctrl_handler, MAX_CID_CONTROLS); + if (chan->ctrl_handler.error) { + dev_err(&chan->video->dev, "failed to init control handler\n"); + goto ctrl_init_error; + } + + /* init video node... */ + chan->video->fops = &tegra_channel_fops; + chan->video->v4l2_dev = &vi->v4l2_dev; + chan->video->queue = &chan->queue; + len = snprintf(chan->video->name, sizeof(chan->video->name), "%s-%s-%u", + dev_name(vi->dev), chan->pg_mode ? "tpg" : "output", + chan->pg_mode ? (chan->id - vi->num_channels) : chan->port[0]); + if (len < 0) { + ret = -EINVAL; + goto ctrl_init_error; + } + + chan->video->vfl_type = VFL_TYPE_VIDEO; + chan->video->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + chan->video->device_caps |= V4L2_CAP_EXT_PIX_FORMAT; + chan->video->vfl_dir = VFL_DIR_RX; + chan->video->release = video_device_release_empty; + chan->video->ioctl_ops = &tegra_channel_ioctl_ops; + chan->video->ctrl_handler = &chan->ctrl_handler; + chan->video->lock = &chan->video_lock; + + video_set_drvdata(chan->video, chan); + + return ret; + +ctrl_init_error: + video_device_release(chan->video); + media_entity_cleanup(&chan->video->entity); + v4l2_ctrl_handler_free(&chan->ctrl_handler); + return ret; +} +EXPORT_SYMBOL(tegra_channel_init_video); + +int tegra_channel_init(struct tegra_channel *chan) +{ + int ret; + struct tegra_mc_vi *vi = chan->vi; + struct device *vi_unit_dev; + + ret = tegra_channel_csi_init(chan); + if (ret) + return ret; + + /* + * The VI device instance has to be retrieved after CSI channel + * has been initialized. This will make sure the TPG ports are + * setup correctly + */ + vi_unit_dev = tegra_channel_get_vi_unit(chan); + chan->width_align = TEGRA_WIDTH_ALIGNMENT; + chan->stride_align = TEGRA_STRIDE_ALIGNMENT; + chan->height_align = TEGRA_HEIGHT_ALIGNMENT; + chan->size_align = size_align_ctrl_qmenu[TEGRA_SIZE_ALIGNMENT]; + chan->num_subdevs = 0; + mutex_init(&chan->video_lock); + chan->capture_descr_index = 0; + chan->capture_descr_sequence = 0; + INIT_LIST_HEAD(&chan->capture); + INIT_LIST_HEAD(&chan->release); + INIT_LIST_HEAD(&chan->entities); + init_waitqueue_head(&chan->start_wait); + init_waitqueue_head(&chan->release_wait); + atomic_set(&chan->restart_version, 1); + chan->capture_version = 0; + spin_lock_init(&chan->start_lock); + spin_lock_init(&chan->release_lock); + INIT_LIST_HEAD(&chan->dequeue); + init_waitqueue_head(&chan->dequeue_wait); + spin_lock_init(&chan->dequeue_lock); + mutex_init(&chan->stop_kthread_lock); + init_rwsem(&chan->reset_lock); + atomic_set(&chan->is_streaming, DISABLE); + spin_lock_init(&chan->capture_state_lock); + spin_lock_init(&chan->buffer_lock); + + /* Init video format */ + vi->fops->vi_init_video_formats(chan); + chan->fmtinfo = tegra_core_get_default_format(); + tegra_channel_update_format(chan, TEGRA_DEF_WIDTH, + TEGRA_DEF_HEIGHT, + chan->fmtinfo->fourcc, + &chan->fmtinfo->bpp, + chan->preferred_stride); + + chan->buffer_offset[0] = 0; + /* Init bpl factor to 1, will be overidden based on interlace_type */ + chan->interlace_bplfactor = 1; + +#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) + /* get the buffers queue... */ + ret = tegra_vb2_dma_init(vi_unit_dev, &chan->alloc_ctx, + SZ_64K, &vi->vb2_dma_alloc_refcnt); + if (ret < 0) + goto vb2_init_error; + +#endif + + chan->queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + chan->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ | VB2_USERPTR; + chan->queue.lock = &chan->video_lock; + chan->queue.drv_priv = chan; + chan->queue.buf_struct_size = sizeof(struct tegra_channel_buffer); + chan->queue.ops = &tegra_channel_queue_qops; +#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) + chan->queue.mem_ops = &vb2_dma_contig_memops; +#endif + chan->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC + | V4L2_BUF_FLAG_TSTAMP_SRC_EOF; + ret = vb2_queue_init(&chan->queue); + if (ret < 0) { + dev_err(chan->vi->dev, "failed to initialize VB2 queue\n"); + goto vb2_queue_error; + } + + chan->deskew_ctx = devm_kzalloc(vi->dev, + sizeof(struct nvcsi_deskew_context), GFP_KERNEL); + if (!chan->deskew_ctx) { + ret = -ENOMEM; + goto deskew_ctx_err; + } + + chan->init_done = true; + + return 0; + +deskew_ctx_err: + devm_kfree(vi->dev, chan->deskew_ctx); +vb2_queue_error: +#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) + tegra_vb2_dma_cleanup(vi_unit_dev, chan->alloc_ctx, + &vi->vb2_dma_alloc_refcnt); +vb2_init_error: +#endif + v4l2_ctrl_handler_free(&chan->ctrl_handler); + return ret; +} +EXPORT_SYMBOL(tegra_channel_init); + +int tegra_channel_cleanup_video(struct tegra_channel *chan) +{ + v4l2_ctrl_handler_free(&chan->ctrl_handler); + media_entity_cleanup(&chan->video->entity); + video_device_release(chan->video); + return 0; +} +EXPORT_SYMBOL(tegra_channel_cleanup_video); + +int tegra_channel_cleanup(struct tegra_channel *chan) +{ + struct device *vi_unit_dev = tegra_channel_get_vi_unit(chan); + + /* release embedded data buffer */ + if (chan->emb_buf_size > 0) { + dma_free_coherent(vi_unit_dev, + chan->emb_buf_size, + chan->emb_buf_addr, chan->emb_buf); + chan->emb_buf_size = 0; + } + + tegra_channel_dealloc_buffer_queue(chan); + + v4l2_ctrl_handler_free(&chan->ctrl_handler); + mutex_lock(&chan->video_lock); + vb2_queue_release(&chan->queue); +#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) + tegra_vb2_dma_cleanup(vi_unit_dev, chan->alloc_ctx, + &chan->vi->vb2_dma_alloc_refcnt); +#endif + mutex_unlock(&chan->video_lock); + + tegra_camera_device_unregister(chan); + + return 0; +} +EXPORT_SYMBOL(tegra_channel_cleanup); + +void tegra_vi_channels_unregister(struct tegra_mc_vi *vi) +{ + struct tegra_channel *it; + + list_for_each_entry(it, &vi->vi_chans, list) { + if (it->video->cdev != NULL) + video_unregister_device(it->video); + } +} +EXPORT_SYMBOL(tegra_vi_channels_unregister); + +int tegra_vi_mfi_work(struct tegra_mc_vi *vi, int channel) +{ + if (vi->fops) + return vi->fops->vi_mfi_work(vi, channel); + + return 0; +} +EXPORT_SYMBOL(tegra_vi_mfi_work); + +int tegra_vi_channels_init(struct tegra_mc_vi *vi) +{ + int ret = 0; + struct tegra_channel *it; + int count = 0; + + list_for_each_entry(it, &vi->vi_chans, list) { + it->vi = vi; + ret = tegra_channel_init(it); + if (ret < 0) { + dev_err(vi->dev, "channel init failed\n"); + continue; + } + count++; + } + + if (count == 0) { + dev_err(vi->dev, "all channel init failed\n"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(tegra_vi_channels_init); +int tegra_vi_channels_cleanup(struct tegra_mc_vi *vi) +{ + int ret = 0, err = 0; + struct tegra_channel *it; + + list_for_each_entry(it, &vi->vi_chans, list) { + if (!it->init_done) + continue; + err = tegra_channel_cleanup(it); + if (err < 0) { + ret = err; + dev_err(vi->dev, "channel cleanup failed, err %d\n", + err); + } + } + return ret; +} +EXPORT_SYMBOL(tegra_vi_channels_cleanup); diff --git a/drivers/media/platform/tegra/camera/vi/core.c b/drivers/media/platform/tegra/camera/vi/core.c new file mode 100644 index 00000000..f0bde35a --- /dev/null +++ b/drivers/media/platform/tegra/camera/vi/core.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVIDIA Tegra Video Input Device Driver Core Helpers + * + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +static const struct tegra_video_format tegra_default_format[] = { + { + TEGRA_VF_DEF, + 10, + MEDIA_BUS_FMT_SRGGB10_1X10, + {2, 1}, + TEGRA_IMAGE_FORMAT_DEF, + TEGRA_IMAGE_DT_RAW10, + V4L2_PIX_FMT_SRGGB10, + "RGRG.. GBGB..", + }, +}; + +/* ----------------------------------------------------------------------------- + * Helper functions + */ + +/** + * tegra_core_get_fourcc_by_idx - get fourcc of a tegra_video format + * @index: array index of the tegra_video_formats + * + * Return: fourcc code + */ +u32 tegra_core_get_fourcc_by_idx(struct tegra_channel *chan, + unsigned int index) +{ + /* return default fourcc format if the index out of bounds */ + if (index > (chan->num_video_formats - 1)) + return V4L2_PIX_FMT_SGRBG10; + index = array_index_nospec(index, chan->num_video_formats); + + return chan->video_formats[index]->fourcc; +} +EXPORT_SYMBOL(tegra_core_get_fourcc_by_idx); + +/** + * tegra_core_get_word_count - Calculate word count + * @frame_width: number of pixels per line + * @fmt: Tegra Video format struct which has BPP information + * + * Return: word count number + */ +u32 tegra_core_get_word_count(unsigned int frame_width, + const struct tegra_video_format *fmt) +{ + return frame_width * fmt->width / 8; +} + +/** + * tegra_core_get_idx_by_code - Retrieve index for a media bus code + * @code: the format media bus code + * + * Return: a index to the format information structure corresponding to the + * given V4L2 media bus format @code, or -1 if no corresponding format can + * be found. + */ +int tegra_core_get_idx_by_code(struct tegra_channel *chan, + unsigned int code, unsigned offset) +{ + unsigned int i; + + for (i = offset; i < chan->num_video_formats; ++i) { + if (chan->video_formats[i]->code == code) + return i; + } + + return -1; +} +EXPORT_SYMBOL(tegra_core_get_idx_by_code); + +/** + * tegra_core_get_code_by_fourcc - Retrieve media bus code for fourcc + * @fourcc: the format 4CC + * + * Return: media bus code format information structure corresponding to the + * given V4L2 fourcc @fourcc, or -1 if no corresponding format found. + */ +int tegra_core_get_code_by_fourcc(struct tegra_channel *chan, + unsigned int fourcc, unsigned offset) +{ + unsigned int i; + + for (i = offset; i < chan->num_video_formats; ++i) { + if (chan->video_formats[i]->fourcc == fourcc) + return chan->video_formats[i]->code; + } + spec_bar(); + + return -1; +} +EXPORT_SYMBOL(tegra_core_get_code_by_fourcc); + +/** + * tegra_core_get_default_format - Get default format + * + * Return: pointer to the format where the default format needs + * to be filled in. + */ +const struct tegra_video_format *tegra_core_get_default_format(void) +{ + return &tegra_default_format[0]; +} +EXPORT_SYMBOL(tegra_core_get_default_format); + +/** + * tegra_core_get_format_by_code - Retrieve format information for a media + * bus code + * @code: the format media bus code + * + * Return: a pointer to the format information structure corresponding to the + * given V4L2 media bus format @code, or NULL if no corresponding format can + * be found. + */ +const struct tegra_video_format * +tegra_core_get_format_by_code(struct tegra_channel *chan, + unsigned int code, unsigned offset) +{ + unsigned int i; + + for (i = offset; i < chan->num_video_formats; ++i) { + if (chan->video_formats[i]->code == code) + return chan->video_formats[i]; + } + spec_bar(); + + return NULL; +} +EXPORT_SYMBOL(tegra_core_get_format_by_code); + +/** + * tegra_core_get_format_by_fourcc - Retrieve format information for a 4CC + * @fourcc: the format 4CC + * + * Return: a pointer to the format information structure corresponding to the + * given V4L2 format @fourcc, or NULL if no corresponding format can be + * found. + */ +const struct tegra_video_format * +tegra_core_get_format_by_fourcc(struct tegra_channel *chan, u32 fourcc) +{ + unsigned int i; + + for (i = 0; i < chan->num_video_formats; ++i) { + if (chan->video_formats[i]->fourcc == fourcc) + return chan->video_formats[i]; + } + spec_bar(); + + return NULL; +} +EXPORT_SYMBOL(tegra_core_get_format_by_fourcc); + +/** + * tegra_core_bytes_per_line - Calculate bytes per line in one frame + * @width: frame width + * @align: number of alignment bytes + * @fmt: Tegra Video format + * + * Simply calcualte the bytes_per_line and if it's not aligned it + * will be padded to alignment boundary. + */ +u32 tegra_core_bytes_per_line(unsigned int width, unsigned int align, + const struct tegra_video_format *fmt) +{ + u32 value = ((width * fmt->bpp.numerator) / fmt->bpp.denominator); + + return roundup(value, align); +} +EXPORT_SYMBOL(tegra_core_bytes_per_line); diff --git a/drivers/media/platform/tegra/camera/vi/graph.c b/drivers/media/platform/tegra/camera/vi/graph.c new file mode 100644 index 00000000..8e1834cb --- /dev/null +++ b/drivers/media/platform/tegra/camera/vi/graph.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVIDIA Media controller graph management + * + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nvcsi/nvcsi.h" + +/* ----------------------------------------------------------------------------- + * Graph Management + */ + +static struct tegra_vi_graph_entity * +tegra_vi_graph_find_entity(struct tegra_channel *chan, + const struct device_node *node) +{ + struct tegra_vi_graph_entity *entity; + + list_for_each_entry(entity, &chan->entities, list) { + if (entity->node == node) + return entity; + } + + return NULL; +} + +static int tegra_vi_graph_build_one(struct tegra_channel *chan, + struct tegra_vi_graph_entity *entity) +{ + u32 link_flags = MEDIA_LNK_FL_ENABLED; + struct media_entity *local; + struct media_entity *remote; + struct media_pad *local_pad; + struct media_pad *remote_pad; + struct tegra_vi_graph_entity *ent; + struct v4l2_fwnode_link link; + struct device_node *ep = NULL; + struct device_node *next; + int ret = 0; + + if (!entity->subdev) { + dev_err(chan->vi->dev, "%s:No subdev under entity, skip linking\n", + __func__); + return 0; + } + + local = entity->entity; + dev_dbg(chan->vi->dev, "creating links for entity %s\n", local->name); + + do { + /* Get the next endpoint and parse its link. */ + next = of_graph_get_next_endpoint(entity->node, ep); + if (next == NULL) + break; + + ep = next; + + dev_dbg(chan->vi->dev, "processing endpoint %pOF\n", + ep); + ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link); + if (ret < 0) { + dev_err(chan->vi->dev, + "failed to parse link for %pOF\n", ep); + continue; + } + + if (link.local_port >= local->num_pads) { + dev_err(chan->vi->dev, + "invalid port number %u for %pOF\n", + link.local_port, to_of_node(link.local_node)); + v4l2_fwnode_put_link(&link); + ret = -EINVAL; + break; + } + + local_pad = &local->pads[link.local_port]; + + /* Skip sink ports, they will be processed from the other end of + * the link. + */ + if (local_pad->flags & MEDIA_PAD_FL_SINK) { + dev_dbg(chan->vi->dev, "skipping sink port %pOF:%u\n", + to_of_node(link.local_node), link.local_port); + v4l2_fwnode_put_link(&link); + continue; + } + + /* Skip channel entity , they will be processed separately. */ + if (link.remote_node == of_fwnode_handle(chan->vi->dev->of_node)) { + dev_dbg(chan->vi->dev, "skipping channel port %pOF:%u\n", + to_of_node(link.local_node), link.local_port); + v4l2_fwnode_put_link(&link); + continue; + } + + /* Find the remote entity. */ + ent = tegra_vi_graph_find_entity(chan, to_of_node(link.remote_node)); + if (ent == NULL) { + dev_err(chan->vi->dev, "no entity found for %pOF\n", + to_of_node(link.remote_node)); + v4l2_fwnode_put_link(&link); + ret = -EINVAL; + break; + } + + remote = ent->entity; + + if (link.remote_port >= remote->num_pads) { + dev_err(chan->vi->dev, "invalid port number %u on %pOF\n", + link.remote_port, to_of_node(link.remote_node)); + v4l2_fwnode_put_link(&link); + ret = -EINVAL; + break; + } + + remote_pad = &remote->pads[link.remote_port]; + + v4l2_fwnode_put_link(&link); + + /* Create the media link. */ + dev_dbg(chan->vi->dev, "creating %s:%u -> %s:%u link\n", + local->name, local_pad->index, + remote->name, remote_pad->index); + + ret = tegra_media_create_link(local, local_pad->index, remote, + remote_pad->index, link_flags); + if (ret < 0) { + dev_err(chan->vi->dev, + "failed to create %s:%u -> %s:%u link\n", + local->name, local_pad->index, + remote->name, remote_pad->index); + break; + } + } while (next); + + return ret; +} + +static int tegra_vi_graph_build_links(struct tegra_channel *chan) +{ + u32 link_flags = MEDIA_LNK_FL_ENABLED; + struct media_entity *source; + struct media_entity *sink; + struct media_pad *source_pad; + struct media_pad *sink_pad; + struct tegra_vi_graph_entity *ent; + struct v4l2_fwnode_link link; + struct device_node *ep = NULL; + int ret = 0; + + dev_dbg(chan->vi->dev, "creating links for channels\n"); + + /* Device not registered */ + if (!chan->init_done) + return -EINVAL; + + ep = chan->endpoint_node; + + dev_dbg(chan->vi->dev, "processing endpoint %pOF\n", ep); + ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link); + if (ret < 0) { + dev_err(chan->vi->dev, "failed to parse link for %pOF\n", + ep); + return -EINVAL; + } + + if (link.local_port >= chan->vi->num_channels) { + dev_err(chan->vi->dev, "wrong channel number for port %u\n", + link.local_port); + v4l2_fwnode_put_link(&link); + return -EINVAL; + } + + dev_dbg(chan->vi->dev, "creating link for channel %s\n", + chan->video->name); + + /* Find the remote entity. */ + ent = tegra_vi_graph_find_entity(chan, to_of_node(link.remote_node)); + if (ent == NULL) { + dev_err(chan->vi->dev, "no entity found for %pOF\n", + to_of_node(link.remote_node)); + v4l2_fwnode_put_link(&link); + return -EINVAL; + } + + if (ent->entity == NULL) { + dev_err(chan->vi->dev, "entity not bounded %pOF\n", + to_of_node(link.remote_node)); + v4l2_fwnode_put_link(&link); + return -EINVAL; + } + + source = ent->entity; + source_pad = &source->pads[link.remote_port]; + sink = &chan->video->entity; + sink_pad = &chan->pad; + + v4l2_fwnode_put_link(&link); + + /* Create the media link. */ + dev_dbg(chan->vi->dev, "creating %s:%u -> %s:%u link\n", + source->name, source_pad->index, + sink->name, sink_pad->index); + + ret = tegra_media_create_link(source, source_pad->index, + sink, sink_pad->index, + link_flags); + if (ret < 0) { + dev_err(chan->vi->dev, + "failed to create %s:%u -> %s:%u link\n", + source->name, source_pad->index, + sink->name, sink_pad->index); + return -EINVAL; + } + + ret = tegra_channel_init_subdevices(chan); + if (ret < 0) { + dev_err(chan->vi->dev, "Failed to initialize sub-devices\n"); + return -EINVAL; + } + + return 0; +} + +static void tegra_vi_graph_remove_links(struct tegra_channel *chan) +{ + struct tegra_vi_graph_entity *entity; + + /* remove entity links and subdev for nvcsi */ + entity = list_first_entry(&chan->entities, + struct tegra_vi_graph_entity, list); + if (entity->entity != NULL) { + media_entity_remove_links(entity->entity); + video_unregister_device(entity->subdev->devnode); + } + + /* remove video node for vi */ + tegra_channel_remove_subdevices(chan); +} + +static int tegra_vi_graph_notify_complete(struct v4l2_async_notifier *notifier) +{ + struct tegra_channel *chan = + container_of(notifier, struct tegra_channel, notifier); + struct tegra_vi_graph_entity *entity; + int ret; + + dev_dbg(chan->vi->dev, "notify complete, all subdevs registered\n"); + + /* Allocate video_device */ + ret = tegra_channel_init_video(chan); + if (ret < 0) { + dev_err(chan->vi->dev, "failed to allocate video device %s\n", + chan->video->name); + return ret; + } + + ret = video_register_device(chan->video, VFL_TYPE_VIDEO, -1); + if (ret < 0) { + dev_err(chan->vi->dev, "failed to register %s\n", + chan->video->name); + goto register_device_error; + } + + /* Create links for every entity. */ + list_for_each_entry(entity, &chan->entities, list) { + if (entity->entity != NULL) { + ret = tegra_vi_graph_build_one(chan, entity); + if (ret < 0) + goto graph_error; + } + } + + /* Create links for channels */ + ret = tegra_vi_graph_build_links(chan); + if (ret < 0) + goto graph_error; + + ret = v4l2_device_register_subdev_nodes(&chan->vi->v4l2_dev); + if (ret < 0) { + dev_err(chan->vi->dev, "failed to register subdev nodes\n"); + goto graph_error; + } + + chan->link_status++; + + return 0; + +graph_error: + video_unregister_device(chan->video); +register_device_error: + video_device_release(chan->video); + + return ret; +} + +static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_subdev *asd) +{ + struct tegra_channel *chan = + container_of(notifier, struct tegra_channel, notifier); + struct tegra_vi_graph_entity *entity; + + /* Locate the entity corresponding to the bound subdev and store the + * subdev pointer. + */ + list_for_each_entry(entity, &chan->entities, list) { + if (entity->node != to_of_node(subdev->dev->fwnode) && + entity->node != to_of_node(subdev->fwnode)) + continue; + + if (entity->subdev) { + dev_err(chan->vi->dev, "duplicate subdev for node %pOF\n", + entity->node); + return -EINVAL; + } + + dev_info(chan->vi->dev, "subdev %s bound\n", subdev->name); + entity->entity = &subdev->entity; + entity->subdev = subdev; + chan->subdevs_bound++; + return 0; + } + + dev_err(chan->vi->dev, "no entity for subdev %s\n", subdev->name); + return -EINVAL; +} + +static void tegra_vi_graph_notify_unbind(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_subdev *asd) +{ + struct tegra_channel *chan = + container_of(notifier, struct tegra_channel, notifier); + struct tegra_vi_graph_entity *entity; + + /* cleanup for complete */ + if (chan->link_status) { + tegra_vi_graph_remove_links(chan); + tegra_channel_cleanup_video(chan); + chan->link_status--; + } + + /* cleanup for bound */ + list_for_each_entry(entity, &chan->entities, list) { + if (entity->subdev == subdev) { + /* remove subdev node */ + chan->subdevs_bound--; + entity->subdev = NULL; + entity->entity = NULL; + dev_info(chan->vi->dev, "subdev %s unbind\n", + subdev->name); + break; + } + } +} + +void tegra_vi_graph_cleanup(struct tegra_mc_vi *vi) +{ + struct tegra_vi_graph_entity *entityp; + struct tegra_vi_graph_entity *entity; + struct tegra_channel *chan; + + list_for_each_entry(chan, &vi->vi_chans, list) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + v4l2_async_notifier_unregister(&chan->notifier); +#else + v4l2_async_nf_unregister(&chan->notifier); +#endif + list_for_each_entry_safe(entity, entityp, + &chan->entities, list) { + of_node_put(entity->node); + list_del(&entity->list); + } + } +} +EXPORT_SYMBOL(tegra_vi_graph_cleanup); + +static int tegra_vi_graph_parse_one(struct tegra_channel *chan, + struct device_node *node) +{ + struct device_node *ep = NULL; + struct device_node *next; + struct device_node *remote = NULL; + struct tegra_vi_graph_entity *entity; + int ret = 0; + + dev_dbg(chan->vi->dev, "parsing node %s\n", node->full_name); + /* Parse all the remote entities and put them into the list */ + do { + next = of_graph_get_next_endpoint(node, ep); + if (next == NULL || !of_device_is_available(next)) + break; + ep = next; + + dev_dbg(chan->vi->dev, "handling endpoint %s\n", ep->full_name); + + remote = of_graph_get_remote_port_parent(ep); + if (!remote) { + ret = -EINVAL; + break; + } + + /* skip the vi of_node and duplicated entities */ + if (remote == chan->vi->dev->of_node || + tegra_vi_graph_find_entity(chan, remote) || + !of_device_is_available(remote)) + continue; + + entity = devm_kzalloc(chan->vi->dev, sizeof(*entity), + GFP_KERNEL); + if (entity == NULL) { + ret = -ENOMEM; + break; + } + + entity->node = remote; + entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE; + entity->asd.match.fwnode = of_fwnode_handle(remote); + + list_add_tail(&entity->list, &chan->entities); + chan->num_subdevs++; + + /* Find remote entities, which are linked to this entity */ + ret = tegra_vi_graph_parse_one(chan, entity->node); + if (ret < 0) + break; + } while (next); + + return ret; +} + +int tegra_vi_tpg_graph_init(struct tegra_mc_vi *mc_vi) +{ + int err = 0; + u32 link_flags = MEDIA_LNK_FL_ENABLED; + struct tegra_csi_device *csi = mc_vi->csi; + struct tegra_channel *vi_it; + struct tegra_csi_channel *csi_it; + + if (!csi) { + dev_err(mc_vi->dev, "CSI is NULL\n"); + return -EINVAL; + } + mc_vi->num_subdevs = mc_vi->num_channels; + vi_it = mc_vi->tpg_start; + csi_it = csi->tpg_start; + + list_for_each_entry_from(vi_it, &mc_vi->vi_chans, list) { + /* Device not registered */ + if (!vi_it->init_done) + continue; + + list_for_each_entry_from(csi_it, &csi->csi_chans, list) { + struct media_entity *source = &csi_it->subdev.entity; + struct media_entity *sink = &vi_it->video->entity; + struct media_pad *source_pad = csi_it->pads; + struct media_pad *sink_pad = &vi_it->pad; + + vi_it->bypass = 0; + err = v4l2_device_register_subdev(&mc_vi->v4l2_dev, + &csi_it->subdev); + if (err) { + dev_err(mc_vi->dev, + "%s:Fail to register subdev\n", + __func__); + goto register_fail; + } + dev_dbg(mc_vi->dev, "creating %s:%u -> %s:%u link\n", + source->name, source_pad->index, + sink->name, sink_pad->index); + + err = tegra_media_create_link(source, source_pad->index, + sink, sink_pad->index, link_flags); + if (err < 0) { + dev_err(mc_vi->dev, + "failed to create %s:%u -> %s:%u link\n", + source->name, source_pad->index, + sink->name, sink_pad->index); + goto register_fail; + } + err = tegra_channel_init_subdevices(vi_it); + if (err) { + dev_err(mc_vi->dev, + "%s:Init subdevice error\n", __func__); + goto register_fail; + } + csi_it = list_next_entry(csi_it, list); + break; + } + } + + return 0; +register_fail: + csi_it = csi->tpg_start; + list_for_each_entry_from(csi_it, &csi->csi_chans, list) + v4l2_device_unregister_subdev(&csi_it->subdev); + return err; +} +EXPORT_SYMBOL(tegra_vi_tpg_graph_init); + +int tegra_vi_graph_init(struct tegra_mc_vi *vi) +{ + struct tegra_vi_graph_entity *entity; + unsigned int num_subdevs = 0; + int ret = 0, i; + struct device_node *ep = NULL; + struct device_node *next; + struct device_node *remote = NULL; + struct tegra_channel *chan; + static const struct v4l2_async_notifier_operations vi_chan_notify_ops = { + .bound = tegra_vi_graph_notify_bound, + .complete = tegra_vi_graph_notify_complete, + .unbind = tegra_vi_graph_notify_unbind, + }; + + /* + * Walk the links to parse the full graph. Each struct tegra_channel + * in vi->vi_chans points to each endpoint of the composite node. + * Thus parse the remote entity for each endpoint in turn. + * Each channel will register a v4l2 async notifier, this makes graph + * init independent between vi_chans. There we can skip the current + * channel in case of something wrong during graph parsing and try + * the next channel. Return error only if memory allocation is failed. + */ + chan = list_first_entry(&vi->vi_chans, struct tegra_channel, list); + do { + /* Get the next endpoint and parse its entities. */ + next = of_graph_get_next_endpoint(vi->dev->of_node, ep); + if (next == NULL) + break; + + ep = next; + + if (!of_device_is_available(ep)) { + dev_info(vi->dev, "ep of_device is not enabled %s.\n", + ep->full_name); + if (list_is_last(&chan->list, &vi->vi_chans)) + break; + /* Try the next channel */ + chan = list_next_entry(chan, list); + continue; + } + + chan->endpoint_node = ep; + entity = devm_kzalloc(vi->dev, sizeof(*entity), GFP_KERNEL); + if (entity == NULL) { + ret = -ENOMEM; + goto done; + } + + dev_dbg(vi->dev, "handling endpoint %s\n", ep->full_name); + remote = of_graph_get_remote_port_parent(ep); + if (!remote) { + dev_info(vi->dev, "cannot find remote port parent\n"); + if (list_is_last(&chan->list, &vi->vi_chans)) + break; + /* Try the next channel */ + chan = list_next_entry(chan, list); + continue; + } + + if (!of_device_is_available(remote)) { + dev_info(vi->dev, "remote of_device is not enabled %s.\n", + ep->full_name); + if (list_is_last(&chan->list, &vi->vi_chans)) + break; + /* Try the next channel */ + chan = list_next_entry(chan, list); + continue; + } + + /* Add the remote entity of this endpoint */ + entity->node = remote; + entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE; + entity->asd.match.fwnode = of_fwnode_handle(remote); + list_add_tail(&entity->list, &chan->entities); + chan->num_subdevs++; + chan->notifier.ops = chan->notifier.ops ? chan->notifier.ops : &vi_chan_notify_ops; + + /* Parse and add entities on this enpoint/channel */ + ret = tegra_vi_graph_parse_one(chan, entity->node); + if (ret < 0) { + dev_info(vi->dev, "graph parse error: %s.\n", + entity->node->full_name); + if (list_is_last(&chan->list, &vi->vi_chans)) + break; + /* Try the next channel */ + chan = list_next_entry(chan, list); + continue; + } + + num_subdevs = chan->num_subdevs; + + i = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + v4l2_async_notifier_init(&chan->notifier); + list_for_each_entry(entity, &chan->entities, list) + __v4l2_async_notifier_add_subdev(&chan->notifier, &entity->asd); +#else + v4l2_async_nf_init(&chan->notifier); + list_for_each_entry(entity, &chan->entities, list) + __v4l2_async_nf_add_subdev(&chan->notifier, &entity->asd); +#endif + + chan->link_status = 0; + chan->subdevs_bound = 0; + + /* Register the async notifier for this channel */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) + ret = v4l2_async_notifier_register(&vi->v4l2_dev, + &chan->notifier); +#else + ret = v4l2_async_nf_register(&vi->v4l2_dev, + &chan->notifier); +#endif + if (ret < 0) { + dev_err(vi->dev, "notifier registration failed\n"); + goto done; + } + + if (list_is_last(&chan->list, &vi->vi_chans)) + break; + /* One endpoint for each vi channel, go with the next channel */ + chan = list_next_entry(chan, list); + } while (next); + +done: + if (ret == -ENOMEM) { + dev_err(vi->dev, "graph init failed\n"); + tegra_vi_graph_cleanup(vi); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(tegra_vi_graph_init); diff --git a/drivers/media/platform/tegra/camera/vi/mc_common.c b/drivers/media/platform/tegra/camera/vi/mc_common.c new file mode 100644 index 00000000..160eb5df --- /dev/null +++ b/drivers/media/platform/tegra/camera/vi/mc_common.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Tegra Video Input device common APIs + * + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +static struct tegra_mc_vi *tegra_mcvi; + +struct tegra_mc_vi *tegra_get_mc_vi(void) +{ + return tegra_mcvi; +} +EXPORT_SYMBOL(tegra_get_mc_vi); + +/* In TPG mode, VI only support 2 formats */ +static void vi_tpg_fmts_bitmap_init(struct tegra_channel *chan) +{ + int index; + + bitmap_zero(chan->fmts_bitmap, MAX_FORMAT_NUM); + + index = tegra_core_get_idx_by_code(chan, + MEDIA_BUS_FMT_SRGGB10_1X10, 0); + bitmap_set(chan->fmts_bitmap, index, 1); + + index = tegra_core_get_idx_by_code(chan, + MEDIA_BUS_FMT_RGB888_1X32_PADHI, 0); + bitmap_set(chan->fmts_bitmap, index, 1); +} + +/* ----------------------------------------------------------------------------- + * Media Controller and V4L2 + */ + +static const char *const vi_pattern_strings[] = { + "Disabled", + "Black/White Direct Mode", + "Color Patch Mode", +}; + +static int vi_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct tegra_mc_vi *vi = container_of(ctrl->handler, struct tegra_mc_vi, + ctrl_handler); + + switch (ctrl->id) { + case V4L2_CID_TEST_PATTERN: + /* + * TPG control is only avaiable to TPG driver, + * it can't be changed to 0 to disable TPG mode. + */ + if (ctrl->val) { + dev_info(&vi->ndev->dev, "Set TPG mode to %d\n", + ctrl->val); + vi->pg_mode = ctrl->val; + } + break; + default: + dev_err(vi->dev, "%s:Not valid ctrl\n", __func__); + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ctrl_ops vi_ctrl_ops = { + .s_ctrl = vi_s_ctrl, +}; + +void tegra_vi_v4l2_cleanup(struct tegra_mc_vi *vi) +{ + v4l2_ctrl_handler_free(&vi->ctrl_handler); + v4l2_device_unregister(&vi->v4l2_dev); + if (!vi->pg_mode) + media_device_unregister(&vi->media_dev); +} +EXPORT_SYMBOL(tegra_vi_v4l2_cleanup); + +static void tegra_vi_notify(struct v4l2_subdev *sd, + unsigned int notification, void *arg) +{ + struct tegra_mc_vi *vi = container_of(sd->v4l2_dev, + struct tegra_mc_vi, v4l2_dev); + const struct v4l2_event *ev = arg; + unsigned i; + struct tegra_channel *chan; + + if (notification != V4L2_DEVICE_NOTIFY_EVENT) + return; + + list_for_each_entry(chan, &vi->vi_chans, list) { + for (i = 0; i < chan->num_subdevs; i++) + if (sd == chan->subdev[i]) { + v4l2_event_queue(chan->video, arg); + if (ev->type == V4L2_EVENT_SOURCE_CHANGE && + vb2_is_streaming(&chan->queue)) + vb2_queue_error(&chan->queue); + } + } +} + +int tegra_vi_v4l2_init(struct tegra_mc_vi *vi) +{ + int ret; + + vi->media_dev.dev = vi->dev; + strlcpy(vi->media_dev.model, "NVIDIA Tegra Video Input Device", + sizeof(vi->media_dev.model)); + vi->media_dev.hw_revision = 3; + + media_device_init(&vi->media_dev); + + ret = media_device_register(&vi->media_dev); + if (ret < 0) { + dev_err(vi->dev, + "media device registration failed (%d)\n", + ret); + return ret; + } + + mutex_init(&vi->bw_update_lock); + vi->v4l2_dev.mdev = &vi->media_dev; + vi->v4l2_dev.notify = tegra_vi_notify; + ret = v4l2_device_register(vi->dev, &vi->v4l2_dev); + if (ret < 0) { + dev_err(vi->dev, "V4L2 device registration failed (%d)\n", + ret); + goto register_error; + } + + return 0; + +register_error: + media_device_cleanup(&vi->media_dev); + media_device_unregister(&vi->media_dev); + return ret; +} + +static int vi_parse_dt(struct tegra_mc_vi *vi, struct platform_device *dev) +{ + int err = 0; + int num_channels = 0; + int i; + struct tegra_channel *item; + struct device_node *node = dev->dev.of_node; + + err = of_property_read_u32(node, "num-channels", &num_channels); + if (err) { + dev_dbg(&dev->dev, + "Failed to find num of channels, set to 0\n"); + num_channels = 0; + } + vi->num_channels = num_channels; + for (i = 0; i < num_channels; i++) { + item = devm_kzalloc(vi->dev, sizeof(*item), GFP_KERNEL); + if (!item) + return -ENOMEM; + item->id = i; + list_add_tail(&item->list, &vi->vi_chans); + } + + return 0; +} + +static void set_vi_register_base(struct tegra_mc_vi *mc_vi, + void __iomem *regbase) +{ + mc_vi->iomem = regbase; +} +int tpg_vi_media_controller_init(struct tegra_mc_vi *mc_vi, int pg_mode) +{ + int err = 0, i; + struct tegra_channel *item; + const unsigned int num_pre_channels = mc_vi->num_channels; + + /* Allocate TPG channel */ + v4l2_ctrl_handler_init(&mc_vi->ctrl_handler, 1); + mc_vi->pattern = v4l2_ctrl_new_std_menu_items(&mc_vi->ctrl_handler, + &vi_ctrl_ops, V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(vi_pattern_strings) - 1, + 0, mc_vi->pg_mode, vi_pattern_strings); + + if (mc_vi->ctrl_handler.error) { + dev_err(mc_vi->dev, "failed to add controls\n"); + err = mc_vi->ctrl_handler.error; + goto ctrl_error; + } + + mc_vi->tpg_start = NULL; + for (i = 0; i < mc_vi->csi->num_tpg_channels; i++) { + item = devm_kzalloc(mc_vi->dev, sizeof(*item), GFP_KERNEL); + if (!item) + goto channel_init_error; + + item->id = num_pre_channels + i; + item->pg_mode = pg_mode; + item->vi = mc_vi; + + err = tegra_channel_init(item); + if (err) { + devm_kfree(mc_vi->dev, item); + goto channel_init_error; + } + + /* Allocate video_device */ + err = tegra_channel_init_video(item); + if (err < 0) { + devm_kfree(mc_vi->dev, item); + dev_err(&item->video->dev, "failed to allocate video device %s\n", + item->video->name); + goto channel_init_error; + } + + err = video_register_device(item->video, VFL_TYPE_VIDEO, -1); + if (err < 0) { + devm_kfree(mc_vi->dev, item); + video_device_release(item->video); + dev_err(&item->video->dev, "failed to register %s\n", + item->video->name); + goto channel_init_error; + } + + vi_tpg_fmts_bitmap_init(item); + /* only inited tpg channels are added */ + list_add_tail(&item->list, &mc_vi->vi_chans); + if (mc_vi->tpg_start == NULL) + mc_vi->tpg_start = item; + } + mc_vi->num_channels += mc_vi->csi->num_tpg_channels; + + err = tegra_vi_tpg_graph_init(mc_vi); + if (err) + goto channel_init_error; + + return err; + +channel_init_error: + dev_err(mc_vi->dev, "%s: channel init failed\n", __func__); + if (!mc_vi->tpg_start) + tpg_vi_media_controller_cleanup(mc_vi); + return err; +ctrl_error: + v4l2_ctrl_handler_free(&mc_vi->ctrl_handler); + dev_err(mc_vi->dev, "%s: v2l4_ctl error\n", __func__); + return err; +} +EXPORT_SYMBOL(tpg_vi_media_controller_init); + +void tpg_vi_media_controller_cleanup(struct tegra_mc_vi *mc_vi) +{ + struct tegra_channel *item; + struct tegra_channel *itemn; + + list_for_each_entry_safe(item, itemn, &mc_vi->vi_chans, list) { + if (!item->pg_mode) + continue; + if (item->video->cdev != NULL) + video_unregister_device(item->video); + tegra_channel_cleanup(item); + list_del(&item->list); + devm_kfree(mc_vi->dev, item); + mc_vi->num_channels--; + } + mc_vi->tpg_start = NULL; + v4l2_ctrl_handler_free(&mc_vi->ctrl_handler); +} +EXPORT_SYMBOL(tpg_vi_media_controller_cleanup); + +static int tegra_vi_media_controller_init_int(struct tegra_mc_vi *mc_vi, + struct platform_device *pdev) +{ + int err = 0; + mc_vi->ndev = pdev; + mc_vi->dev = &pdev->dev; + INIT_LIST_HEAD(&mc_vi->vi_chans); + mutex_init(&mc_vi->mipical_lock); + + err = vi_parse_dt(mc_vi, pdev); + if (err) + goto mc_init_fail; + + tegra_mcvi = mc_vi; + + err = tegra_vi_v4l2_init(mc_vi); + if (err < 0) + goto mc_init_fail; + + /* + * if there is no vi channels listed in DT, + * no need to init the channel and graph + */ + if (mc_vi->num_channels == 0) + return 0; + + /* Init Tegra VI channels */ + err = tegra_vi_channels_init(mc_vi); + if (err < 0) { + dev_err(&pdev->dev, "Init channel failed\n"); + goto channels_error; + } + + /* Setup media links between VI and external sensor subdev. */ + err = tegra_vi_graph_init(mc_vi); + if (err < 0) + goto graph_error; + + return 0; + +graph_error: + tegra_vi_channels_cleanup(mc_vi); +channels_error: + tegra_vi_v4l2_cleanup(mc_vi); +mc_init_fail: + dev_err(&pdev->dev, "%s: failed\n", __func__); + return err; +} + +int tegra_vi_media_controller_init(struct tegra_mc_vi *mc_vi, + struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = (struct nvhost_device_data *) + platform_get_drvdata(pdev); + + if (!pdata) + return -EINVAL; + set_vi_register_base(mc_vi, pdata->aperture[0]); + + return tegra_vi_media_controller_init_int(mc_vi, pdev); +} +EXPORT_SYMBOL(tegra_vi_media_controller_init); + +int tegra_capture_vi_media_controller_init(struct tegra_mc_vi *mc_vi, + struct platform_device *pdev) +{ + return tegra_vi_media_controller_init_int(mc_vi, pdev); +} +EXPORT_SYMBOL(tegra_capture_vi_media_controller_init); + +void tegra_vi_media_controller_cleanup(struct tegra_mc_vi *mc_vi) +{ + tegra_vi_channels_unregister(mc_vi); + tegra_vi_graph_cleanup(mc_vi); + tegra_vi_channels_cleanup(mc_vi); + tegra_vi_v4l2_cleanup(mc_vi); + tegra_mcvi = NULL; +} +EXPORT_SYMBOL(tegra_vi_media_controller_cleanup); diff --git a/drivers/media/platform/tegra/camera/vi/vi5_fops.c b/drivers/media/platform/tegra/camera/vi/vi5_fops.c new file mode 100644 index 00000000..53b6ec2d --- /dev/null +++ b/drivers/media/platform/tegra/camera/vi/vi5_fops.c @@ -0,0 +1,994 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Tegra Video Input 5 device common APIs + * + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "vi5_formats.h" +#include "vi5_fops.h" +#include + +#define DEFAULT_FRAMERATE 30 +#define BPP_MEM 2 +#define VI_CSI_CLK_SCALE 110 +#define PG_BITRATE 32 +#define SLVSEC_STREAM_MAIN 0U + +#define CAPTURE_TIMEOUT_MS 2500 + +static const struct vi_capture_setup default_setup = { + .channel_flags = 0 + | CAPTURE_CHANNEL_FLAG_VIDEO + | CAPTURE_CHANNEL_FLAG_RAW + | CAPTURE_CHANNEL_FLAG_EMBDATA + | CAPTURE_CHANNEL_FLAG_LINETIMER + , + + .vi_channel_mask = ~0ULL, + .vi2_channel_mask = ~0ULL, + + .queue_depth = CAPTURE_MIN_BUFFERS, + .request_size = sizeof(struct capture_descriptor), + .mem = 0, /* fill in later */ +}; + +static const struct capture_descriptor capture_template = { + .sequence = 0, + + .capture_flags = 0 + | CAPTURE_FLAG_STATUS_REPORT_ENABLE + | CAPTURE_FLAG_ERROR_REPORT_ENABLE + , + + .ch_cfg = { + .pixfmt_enable = 0, /* no output */ + .match = { + .stream = 0, /* one-hot bit encoding */ + .stream_mask = 0x3f, + .vc = (1u << 0), /* one-hot bit encoding */ + .vc_mask = 0xffff, + }, + }, +}; + +static void vi5_init_video_formats(struct tegra_channel *chan) +{ + int i; + + chan->num_video_formats = ARRAY_SIZE(vi5_video_formats); + for (i = 0; i < chan->num_video_formats; i++) + chan->video_formats[i] = &vi5_video_formats[i]; +} + +static int tegra_vi5_g_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct tegra_channel *chan = container_of(ctrl->handler, + struct tegra_channel, ctrl_handler); + struct v4l2_subdev *sd = chan->subdev_on_csi; + struct camera_common_data *s_data = + to_camera_common_data(sd->dev); + struct tegracam_ctrl_handler *handler; + struct tegracam_sensor_data *sensor_data; + + if (!s_data) + return -EINVAL; + handler = s_data->tegracam_ctrl_hdl; + if (!handler) + return -EINVAL; + sensor_data = &handler->sensor_data; + + /* TODO: Support reading blobs for multiple devices */ + switch (ctrl->id) { + case TEGRA_CAMERA_CID_SENSOR_CONFIG: { + struct sensor_cfg *cfg = &s_data->sensor_props.cfg; + + memcpy(ctrl->p_new.p, cfg, sizeof(struct sensor_cfg)); + break; + } + case TEGRA_CAMERA_CID_SENSOR_MODE_BLOB: { + struct sensor_blob *blob = &sensor_data->mode_blob; + + memcpy(ctrl->p_new.p, blob, sizeof(struct sensor_blob)); + break; + } + case TEGRA_CAMERA_CID_SENSOR_CONTROL_BLOB: { + struct sensor_blob *blob = &sensor_data->ctrls_blob; + + memcpy(ctrl->p_new.p, blob, sizeof(struct sensor_blob)); + break; + } + default: + pr_err("%s: unknown ctrl id.\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int tegra_vi5_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct tegra_channel *chan = container_of(ctrl->handler, + struct tegra_channel, ctrl_handler); + int err = 0; + + switch (ctrl->id) { + case TEGRA_CAMERA_CID_WRITE_ISPFORMAT: + chan->write_ispformat = ctrl->val; + break; + default: + dev_err(&chan->video->dev, "%s:Not valid ctrl\n", __func__); + return -EINVAL; + } + + return err; +} + +static const struct v4l2_ctrl_ops vi5_ctrl_ops = { + .s_ctrl = tegra_vi5_s_ctrl, + .g_volatile_ctrl = tegra_vi5_g_volatile_ctrl, +}; + +static const struct v4l2_ctrl_config vi5_custom_ctrls[] = { + { + .ops = &vi5_ctrl_ops, + .id = TEGRA_CAMERA_CID_WRITE_ISPFORMAT, + .name = "Write ISP format", + .type = V4L2_CTRL_TYPE_INTEGER, + .def = 1, + .min = 1, + .max = 1, + .step = 1, + }, + { + .ops = &vi5_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_CONFIG, + .name = "Sensor configuration", + .type = V4L2_CTRL_TYPE_U32, + .flags = V4L2_CTRL_FLAG_READ_ONLY | + V4L2_CTRL_FLAG_HAS_PAYLOAD | + V4L2_CTRL_FLAG_VOLATILE, + .min = 0, + .max = 0xFFFFFFFF, + .def = 0, + .step = 1, + .dims = { SENSOR_CONFIG_SIZE }, + }, + { + .ops = &vi5_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_MODE_BLOB, + .name = "Sensor mode I2C packet", + .type = V4L2_CTRL_TYPE_U32, + .flags = V4L2_CTRL_FLAG_READ_ONLY | + V4L2_CTRL_FLAG_HAS_PAYLOAD | + V4L2_CTRL_FLAG_VOLATILE, + .min = 0, + .max = 0xFFFFFFFF, + .def = 0, + .step = 1, + .dims = { SENSOR_MODE_BLOB_SIZE }, + }, + { + .ops = &vi5_ctrl_ops, + .id = TEGRA_CAMERA_CID_SENSOR_CONTROL_BLOB, + .name = "Sensor control I2C packet", + .type = V4L2_CTRL_TYPE_U32, + .flags = V4L2_CTRL_FLAG_READ_ONLY | + V4L2_CTRL_FLAG_HAS_PAYLOAD | + V4L2_CTRL_FLAG_VOLATILE, + .min = 0, + .max = 0xFFFFFFFF, + .def = 0, + .step = 1, + .dims = { SENSOR_CTRL_BLOB_SIZE }, + }, +}; + +static int vi5_add_ctrls(struct tegra_channel *chan) +{ + int i; + + /* Add vi5 custom controls */ + for (i = 0; i < ARRAY_SIZE(vi5_custom_ctrls); i++) { + v4l2_ctrl_new_custom(&chan->ctrl_handler, + &vi5_custom_ctrls[i], NULL); + if (chan->ctrl_handler.error) { + dev_err(chan->vi->dev, + "Failed to add %s ctrl\n", + vi5_custom_ctrls[i].name); + return chan->ctrl_handler.error; + } + } + + return 0; +} + +static int vi5_channel_setup_queue(struct tegra_channel *chan, + unsigned int *nbuffers) +{ + int ret = 0; + + *nbuffers = clamp(*nbuffers, CAPTURE_MIN_BUFFERS, CAPTURE_MAX_BUFFERS); + + ret = tegra_channel_alloc_buffer_queue(chan, *nbuffers); + if (ret < 0) + goto done; + + chan->capture_reqs_enqueued = 0; + +done: + return ret; +} + +static struct tegra_csi_channel *find_linked_csi_channel( + struct tegra_channel *chan) +{ + struct tegra_csi_channel *csi_it; + struct tegra_csi_channel *csi_chan = NULL; + int i; + + struct tegra_csi_device *csi = tegra_get_mc_csi(); + if (csi == NULL) + { + dev_err(chan->vi->dev, "csi mc not found"); + return NULL; + } + /* Find connected csi_channel */ + list_for_each_entry(csi_it, &csi->csi_chans, list) { + for (i = 0; i < chan->num_subdevs; i++) { + if (chan->subdev[i] == &csi_it->subdev) { + csi_chan = csi_it; + break; + } + } + } + return csi_chan; +} + +static int tegra_channel_capture_setup(struct tegra_channel *chan, unsigned int vi_port) +{ + struct vi_capture_setup setup = default_setup; + long err; + + setup.queue_depth = chan->capture_queue_depth; + + trace_tegra_channel_capture_setup(chan, 0); + + chan->request[vi_port] = dma_alloc_coherent(chan->tegra_vi_channel[vi_port]->rtcpu_dev, + setup.queue_depth * setup.request_size, + &setup.iova, GFP_KERNEL); + if (chan->request[vi_port] == NULL) { + dev_err(chan->vi->dev, "dma_alloc_coherent failed\n"); + return -ENOMEM; + } + + if (chan->is_slvsec) { + setup.channel_flags |= CAPTURE_CHANNEL_FLAG_SLVSEC; + setup.slvsec_stream_main = SLVSEC_STREAM_MAIN; + setup.slvsec_stream_sub = SLVSEC_STREAM_DISABLED; + } + + /* Set the NVCSI PixelParser index (Stream ID) and VC ID*/ + setup.csi_stream_id = chan->port[vi_port]; + setup.virtual_channel_id = chan->virtual_channel; + /* Set CSI port info */ + if (chan->pg_mode) { + setup.csi_port = NVCSI_PORT_UNSPECIFIED; + } else { + struct tegra_csi_channel *csi_chan = find_linked_csi_channel(chan); + + if (csi_chan == NULL) + { + dev_err(chan->vi->dev, "csi_chan not found"); + return -EINVAL; + } + + setup.csi_port = csi_chan->ports[vi_port].csi_port; + } + + if (chan->fmtinfo->fourcc == V4L2_PIX_FMT_NV16) + setup.channel_flags |= CAPTURE_CHANNEL_FLAG_SEMI_PLANAR; + + err = vi_capture_setup(chan->tegra_vi_channel[vi_port], &setup); + if (err) { + dev_err(chan->vi->dev, "vi capture setup failed\n"); + dma_free_coherent(chan->tegra_vi_channel[vi_port]->rtcpu_dev, + setup.queue_depth * setup.request_size, + chan->request, setup.iova); + return err; + } + + return 0; +} + +static void vi5_setup_surface(struct tegra_channel *chan, + struct tegra_channel_buffer *buf, unsigned int descr_index, unsigned int vi_port) +{ + dma_addr_t offset = buf->addr + chan->buffer_offset[vi_port]; + u32 height = chan->format.height; + u32 width = chan->format.width; + u32 format = chan->fmtinfo->img_fmt; + u32 bpl = chan->format.bytesperline; + u32 data_type = chan->fmtinfo->img_dt; + u32 nvcsi_stream = chan->port[vi_port]; + struct capture_descriptor_memoryinfo *desc_memoryinfo = + &chan->tegra_vi_channel[vi_port]-> + capture_data->requests_memoryinfo[descr_index]; + struct capture_descriptor *desc = &chan->request[vi_port][descr_index]; + + if (chan->valid_ports > NVCSI_STREAM_1) { + height = chan->gang_height; + width = chan->gang_width; + offset = buf->addr + chan->buffer_offset[1 - vi_port]; + } + + memcpy(desc, &capture_template, sizeof(capture_template)); + memset(desc_memoryinfo, 0, sizeof(*desc_memoryinfo)); + + desc->sequence = chan->capture_descr_sequence; + desc->ch_cfg.match.stream = (1u << nvcsi_stream); /* one-hot bit encoding */ + desc->ch_cfg.match.vc = (1u << chan->virtual_channel); /* one-hot bit encoding */ + desc->ch_cfg.frame.frame_x = width; + desc->ch_cfg.frame.frame_y = height; + desc->ch_cfg.match.datatype = data_type; + desc->ch_cfg.match.datatype_mask = 0x3f; + desc->ch_cfg.pixfmt_enable = 1; + desc->ch_cfg.pixfmt.format = format; + + desc_memoryinfo->surface[0].base_address = offset; + desc_memoryinfo->surface[0].size = chan->format.bytesperline * height; + desc->ch_cfg.atomp.surface_stride[0] = bpl; + if (chan->fmtinfo->fourcc == V4L2_PIX_FMT_NV16) { + desc_memoryinfo->surface[1].base_address = offset + chan->format.sizeimage / 2; + desc_memoryinfo->surface[1].size = chan->format.bytesperline * height; + desc->ch_cfg.atomp.surface_stride[1] = bpl; + } + + if (chan->embedded_data_height > 0) { + desc->ch_cfg.embdata_enable = 1; + desc->ch_cfg.frame.embed_x = chan->embedded_data_width * BPP_MEM; + desc->ch_cfg.frame.embed_y = chan->embedded_data_height; + + desc_memoryinfo->surface[VI_ATOMP_SURFACE_EMBEDDED].base_address + = chan->emb_buf; + desc_memoryinfo->surface[VI_ATOMP_SURFACE_EMBEDDED].size + = desc->ch_cfg.frame.embed_x * desc->ch_cfg.frame.embed_y; + + desc->ch_cfg.atomp.surface_stride[VI_ATOMP_SURFACE_EMBEDDED] + = chan->embedded_data_width * BPP_MEM; + } + + chan->capture_descr_sequence += 1; +} + +static void vi5_release_buffer(struct tegra_channel *chan, + struct tegra_channel_buffer *buf) +{ + struct vb2_v4l2_buffer *vbuf = &buf->buf; + + vbuf->sequence = chan->sequence++; + vbuf->field = V4L2_FIELD_NONE; + vb2_set_plane_payload(&vbuf->vb2_buf, 0, chan->format.sizeimage); + + vb2_buffer_done(&vbuf->vb2_buf, buf->vb2_state); +} + +static void vi5_capture_enqueue(struct tegra_channel *chan, + struct tegra_channel_buffer *buf) +{ + int err = 0; + unsigned int vi_port; + unsigned long flags; + struct tegra_mc_vi *vi = chan->vi; + struct vi_capture_req request[2] = {{ + .buffer_index = 0, + }, { + .buffer_index = 0, + }}; + + for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) { + vi5_setup_surface(chan, buf, chan->capture_descr_index, vi_port); + request[vi_port].buffer_index = chan->capture_descr_index; + + err = vi_capture_request(chan->tegra_vi_channel[vi_port], &request[vi_port]); + + if (err) { + dev_err(vi->dev, "uncorr_err: request dispatch err %d\n", err); + goto uncorr_err; + } + + spin_lock_irqsave(&chan->capture_state_lock, flags); + if (chan->capture_state != CAPTURE_ERROR) { + chan->capture_state = CAPTURE_GOOD; + chan->capture_reqs_enqueued += 1; + } + spin_unlock_irqrestore(&chan->capture_state_lock, flags); + buf->capture_descr_index[vi_port] = chan->capture_descr_index; + } + chan->capture_descr_index = ((chan->capture_descr_index + 1) + % (chan->capture_queue_depth)); + + spin_lock(&chan->dequeue_lock); + list_add_tail(&buf->queue, &chan->dequeue); + spin_unlock(&chan->dequeue_lock); + + wake_up_interruptible(&chan->dequeue_wait); + + return; + +uncorr_err: + spin_lock_irqsave(&chan->capture_state_lock, flags); + chan->capture_state = CAPTURE_ERROR; + spin_unlock_irqrestore(&chan->capture_state_lock, flags); +} + +static void vi5_capture_dequeue(struct tegra_channel *chan, + struct tegra_channel_buffer *buf) +{ + int err = 0; + unsigned int vi_port = 0; + int gang_prev_frame_id = 0; + unsigned long flags; + struct tegra_mc_vi *vi = chan->vi; + struct vb2_v4l2_buffer *vb = &buf->buf; + struct timespec64 ts; + struct capture_descriptor *descr = NULL; + + for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) { + descr = &chan->request[vi_port][buf->capture_descr_index[vi_port]]; + + if (buf->vb2_state != VB2_BUF_STATE_ACTIVE) + goto rel_buf; + + /* Dequeue a frame and check its capture status */ + err = vi_capture_status(chan->tegra_vi_channel[vi_port], CAPTURE_TIMEOUT_MS); + if (err) { + if (err == -ETIMEDOUT) { + dev_err(vi->dev, + "uncorr_err: request timed out after %d ms\n", + CAPTURE_TIMEOUT_MS); + } else { + dev_err(vi->dev, "uncorr_err: request err %d\n", err); + } + goto uncorr_err; + } else if (descr->status.status != CAPTURE_STATUS_SUCCESS) { + if ((descr->status.flags + & CAPTURE_STATUS_FLAG_CHANNEL_IN_ERROR) != 0) { + chan->queue_error = true; + dev_err(vi->dev, "uncorr_err: flags %d, err_data %d\n", + descr->status.flags, descr->status.err_data); + } else { + dev_warn(vi->dev, + "corr_err: discarding frame %d, flags: %d, " + "err_data %d\n", + descr->status.frame_id, descr->status.flags, + descr->status.err_data); + buf->vb2_state = VB2_BUF_STATE_ERROR; + goto done; + } + } else if (!vi_port) { + gang_prev_frame_id = descr->status.frame_id; + } else if (descr->status.frame_id != gang_prev_frame_id) { + dev_err(vi->dev, "frame_id out of sync: ch2 %d vs ch1 %d\n", + gang_prev_frame_id, descr->status.frame_id); + goto uncorr_err; + } + + spin_lock_irqsave(&chan->capture_state_lock, flags); + if (chan->capture_state != CAPTURE_ERROR) { + chan->capture_reqs_enqueued -= 1; + chan->capture_state = CAPTURE_GOOD; + } + spin_unlock_irqrestore(&chan->capture_state_lock, flags); + } + + wake_up_interruptible(&chan->start_wait); + /* Read SOF from capture descriptor */ + ts = ns_to_timespec64((s64)descr->status.sof_timestamp); + trace_tegra_channel_capture_frame("sof", &ts); + vb->vb2_buf.timestamp = descr->status.sof_timestamp; + + buf->vb2_state = VB2_BUF_STATE_DONE; + /* Read EOF from capture descriptor */ + ts = ns_to_timespec64((s64)descr->status.eof_timestamp); + trace_tegra_channel_capture_frame("eof", &ts); + +done: + goto rel_buf; + +uncorr_err: + spin_lock_irqsave(&chan->capture_state_lock, flags); + chan->capture_state = CAPTURE_ERROR; + spin_unlock_irqrestore(&chan->capture_state_lock, flags); + + buf->vb2_state = VB2_BUF_STATE_ERROR; + +rel_buf: + vi5_release_buffer(chan, buf); +} + +static int vi5_channel_error_recover(struct tegra_channel *chan, + bool queue_error) +{ + int err = 0; + unsigned int vi_port = 0; + struct tegra_channel_buffer *buf; + struct tegra_mc_vi *vi = chan->vi; + struct v4l2_subdev *csi_subdev; + + /* stop vi channel */ + for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) { + err = vi_capture_release(chan->tegra_vi_channel[vi_port], + CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE); + if (err) { + dev_err(&chan->video->dev, "vi capture release failed\n"); + goto done; + } + vi_channel_close_ex(chan->id + vi_port, chan->tegra_vi_channel[vi_port]); + chan->tegra_vi_channel[vi_port] = NULL; + } + + + /* release all previously-enqueued capture buffers to v4l2 */ + while (!list_empty(&chan->capture)) { + buf = dequeue_buffer(chan, false); + if (!buf) + break; + vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); + } + while (!list_empty(&chan->dequeue)) { + buf = dequeue_dequeue_buffer(chan); + if (!buf) + break; + buf->vb2_state = VB2_BUF_STATE_ERROR; + vi5_capture_dequeue(chan, buf); + } + + /* report queue error to application */ + if (queue_error) + vb2_queue_error(&chan->queue); + + /* reset nvcsi stream */ + csi_subdev = tegra_channel_find_linked_csi_subdev(chan); + if (!csi_subdev) { + dev_err(vi->dev, "unable to find linked csi subdev\n"); + err = -1; + goto done; + } + +#if 0 /* disable for Canonical kernel */ + v4l2_subdev_call(csi_subdev, core, sync, + V4L2_SYNC_EVENT_SUBDEV_ERROR_RECOVER); +#endif + + /* restart vi channel */ + for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) { + chan->tegra_vi_channel[vi_port] = vi_channel_open_ex(chan->id + vi_port, false); + if (IS_ERR(chan->tegra_vi_channel[vi_port])) { + err = PTR_ERR(chan); + goto done; + } + err = tegra_channel_capture_setup(chan, vi_port); + if (err < 0) + goto done; + } + + chan->sequence = 0; + tegra_channel_init_ring_buffer(chan); + + chan->capture_reqs_enqueued = 0; + + /* clear capture channel error state */ + chan->capture_state = CAPTURE_IDLE; + +done: + return err; +} + +static int tegra_channel_kthread_capture_enqueue(void *data) +{ + struct tegra_channel *chan = data; + struct tegra_channel_buffer *buf; + unsigned long flags; + set_freezable(); + + while (1) { + try_to_freeze(); + + wait_event_interruptible(chan->start_wait, + (kthread_should_stop() || !list_empty(&chan->capture))); + + while (!(kthread_should_stop() || list_empty(&chan->capture))) { + spin_lock_irqsave(&chan->capture_state_lock, flags); + if ((chan->capture_state == CAPTURE_ERROR) + || !(chan->capture_reqs_enqueued + < (chan->capture_queue_depth * chan->valid_ports))) { + spin_unlock_irqrestore( + &chan->capture_state_lock, flags); + break; + } + spin_unlock_irqrestore(&chan->capture_state_lock, + flags); + + buf = dequeue_buffer(chan, false); + if (!buf) + break; + + buf->vb2_state = VB2_BUF_STATE_ACTIVE; + + vi5_capture_enqueue(chan, buf); + } + + if (kthread_should_stop()) + break; + } + + return 0; +} + +static int tegra_channel_kthread_capture_dequeue(void *data) +{ + int err = 0; + unsigned long flags; + struct tegra_channel *chan = data; + struct tegra_channel_buffer *buf; + + set_freezable(); + + while (1) { + try_to_freeze(); + + wait_event_interruptible(chan->dequeue_wait, + (kthread_should_stop() + || !list_empty(&chan->dequeue) + || (chan->capture_state == CAPTURE_ERROR))); + + while (!(kthread_should_stop() || list_empty(&chan->dequeue) + || (chan->capture_state == CAPTURE_ERROR))) { + + buf = dequeue_dequeue_buffer(chan); + if (!buf) + break; + + vi5_capture_dequeue(chan, buf); + } + + spin_lock_irqsave(&chan->capture_state_lock, flags); + if (chan->capture_state == CAPTURE_ERROR) { + spin_unlock_irqrestore(&chan->capture_state_lock, + flags); + err = tegra_channel_error_recover(chan, false); + if (err) { + dev_err(chan->vi->dev, + "fatal: error recovery failed\n"); + break; + } + } else + spin_unlock_irqrestore(&chan->capture_state_lock, + flags); + if (kthread_should_stop()) + break; + } + + return 0; +} + +static int vi5_channel_start_kthreads(struct tegra_channel *chan) +{ + int err = 0; + + /* Start the kthread for capture enqueue */ + if (chan->kthread_capture_start) { + dev_err(chan->vi->dev, "enqueue kthread already initialized\n"); + err = -1; + goto done; + } + chan->kthread_capture_start = kthread_run( + tegra_channel_kthread_capture_enqueue, chan, chan->video->name); + if (IS_ERR(chan->kthread_capture_start)) { + dev_err(&chan->video->dev, + "failed to run kthread for capture enqueue\n"); + err = PTR_ERR(chan->kthread_capture_start); + goto done; + } + + /* Start the kthread for capture dequeue */ + if (chan->kthread_capture_dequeue) { + dev_err(chan->vi->dev, "dequeue kthread already initialized\n"); + err = -1; + goto done; + } + chan->kthread_capture_dequeue = kthread_run( + tegra_channel_kthread_capture_dequeue, chan, chan->video->name); + if (IS_ERR(chan->kthread_capture_dequeue)) { + dev_err(&chan->video->dev, + "failed to run kthread for capture dequeue\n"); + err = PTR_ERR(chan->kthread_capture_dequeue); + goto done; + } + +done: + return err; +} + +static void vi5_channel_stop_kthreads(struct tegra_channel *chan) +{ + mutex_lock(&chan->stop_kthread_lock); + + /* Stop the kthread for capture enqueue */ + if (chan->kthread_capture_start) { + kthread_stop(chan->kthread_capture_start); + chan->kthread_capture_start = NULL; + } + + /* Stop the kthread for capture dequeue */ + if (chan->kthread_capture_dequeue) { + kthread_stop(chan->kthread_capture_dequeue); + chan->kthread_capture_dequeue = NULL; + } + + mutex_unlock(&chan->stop_kthread_lock); +} + +static void vi5_unit_get_device_handle(struct platform_device *pdev, + uint32_t csi_stream_id, struct device **dev) +{ + if (dev) + *dev = vi_csi_stream_to_nvhost_device(pdev, csi_stream_id); + else + dev_err(&pdev->dev, "dev pointer is NULL\n"); +} + +static int vi5_channel_start_streaming(struct vb2_queue *vq, u32 count) +{ + struct tegra_channel *chan = vb2_get_drv_priv(vq); + /* WAR: With newer version pipe init has some race condition */ + /* TODO: resolve this issue to block userspace not to cleanup media */ + int ret = 0; + int vi_port = 0; + unsigned long flags; + struct v4l2_subdev *sd; + struct device_node *node; + struct sensor_mode_properties *sensor_mode; + struct camera_common_data *s_data; + unsigned int emb_buf_size = 0; + + /* Skip in bypass mode */ + if (!chan->bypass) { + for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) { + chan->tegra_vi_channel[vi_port] = + vi_channel_open_ex(chan->id + vi_port, false); + if (IS_ERR(chan->tegra_vi_channel[vi_port])) { + ret = PTR_ERR(chan); + goto err_open_ex; + } + spin_lock_irqsave(&chan->capture_state_lock, flags); + chan->capture_state = CAPTURE_IDLE; + spin_unlock_irqrestore(&chan->capture_state_lock, flags); + + if (!chan->pg_mode) { + sd = chan->subdev_on_csi; + node = sd->dev->of_node; + s_data = to_camera_common_data(sd->dev); + + /* get sensor properties from DT */ + if (s_data != NULL && node != NULL) { + int idx = s_data->mode_prop_idx; + + emb_buf_size = 0; + if (idx < s_data->sensor_props.\ + num_modes) { + sensor_mode = + &s_data->sensor_props.\ + sensor_modes[idx]; + + chan->embedded_data_width = + sensor_mode->\ + image_properties.width; + chan->embedded_data_height = + sensor_mode->\ + image_properties.\ + embedded_metadata_height; + /* rounding up to page size */ + emb_buf_size = + round_up(chan->\ + embedded_data_width * + chan->\ + embedded_data_height * + BPP_MEM, PAGE_SIZE); + } + } + /* Allocate buffer for Embedded Data if need to*/ + if (emb_buf_size > chan->emb_buf_size) { + struct device *vi_unit_dev; + + vi5_unit_get_device_handle(\ + chan->vi->ndev, chan->port[0],\ + &vi_unit_dev); + /* + * if old buffer is smaller than what we need, + * release the old buffer and re-allocate a + * bigger one below. + */ + if (chan->emb_buf_size > 0) { + dma_free_coherent(vi_unit_dev, + chan->emb_buf_size, + chan->emb_buf_addr, + chan->emb_buf); + chan->emb_buf_size = 0; + } + + chan->emb_buf_addr = + dma_alloc_coherent(vi_unit_dev, + emb_buf_size, + &chan->emb_buf, GFP_KERNEL); + if (!chan->emb_buf_addr) { + dev_err(&chan->video->dev, + "Can't allocate memory" + "for embedded data\n"); + goto err_setup; + } + chan->emb_buf_size = emb_buf_size; + } + } + ret = tegra_channel_capture_setup(chan, vi_port); + if (ret < 0) + goto err_setup; + } + chan->sequence = 0; + tegra_channel_init_ring_buffer(chan); + + ret = vi5_channel_start_kthreads(chan); + if (ret != 0) + goto err_start_kthreads; + } + + /* csi stream/sensor devices should be streamon post vi channel setup */ + ret = tegra_channel_set_stream(chan, true); + if (ret < 0) + goto err_set_stream; + + ret = tegra_channel_write_blobs(chan); + if (ret < 0) + goto err_write_blobs; + + return 0; + +err_write_blobs: + tegra_channel_set_stream(chan, false); + +err_set_stream: + if (!chan->bypass) + vi5_channel_stop_kthreads(chan); + +err_start_kthreads: + if (!chan->bypass) + for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) + vi_capture_release(chan->tegra_vi_channel[vi_port], + CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE); + +err_setup: + if (!chan->bypass) + for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) { + vi_channel_close_ex(chan->id + vi_port, chan->tegra_vi_channel[vi_port]); + chan->tegra_vi_channel[vi_port] = NULL; + } + +err_open_ex: + vq->start_streaming_called = 0; + tegra_channel_queued_buf_done(chan, VB2_BUF_STATE_QUEUED, false); + + return ret; +} + +static int vi5_channel_stop_streaming(struct vb2_queue *vq) +{ + struct tegra_channel *chan = vb2_get_drv_priv(vq); + long err; + int vi_port = 0; + if (!chan->bypass) + vi5_channel_stop_kthreads(chan); + + /* csi stream/sensor(s) devices to be closed before vi channel */ + tegra_channel_set_stream(chan, false); + + if (!chan->bypass) { + for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) { + err = vi_capture_release(chan->tegra_vi_channel[vi_port], + CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE); + + if (err) + dev_err(&chan->video->dev, + "vi capture release failed\n"); + + vi_channel_close_ex(chan->id + vi_port, chan->tegra_vi_channel[vi_port]); + chan->tegra_vi_channel[vi_port] = NULL; + } + + /* release all remaining buffers to v4l2 */ + tegra_channel_queued_buf_done(chan, VB2_BUF_STATE_ERROR, false); + } + + return 0; +} + +int tegra_vi5_enable(struct tegra_mc_vi *vi) +{ + int ret; + + ret = tegra_camera_emc_clk_enable(); + if (ret) + goto err_emc_enable; + + return 0; + +err_emc_enable: + return ret; +} + +void tegra_vi5_disable(struct tegra_mc_vi *vi) +{ + tegra_channel_ec_close(vi); + tegra_camera_emc_clk_disable(); +} + +static int vi5_power_on(struct tegra_channel *chan) +{ + int ret = 0; + struct tegra_mc_vi *vi; + struct tegra_csi_device *csi; + + vi = chan->vi; + csi = vi->csi; + + ret = tegra_vi5_enable(vi); + if (ret < 0) + return ret; + + ret = tegra_channel_set_power(chan, 1); + if (ret < 0) { + dev_err(vi->dev, "Failed to power on subdevices\n"); + return ret; + } + + return 0; +} + +static void vi5_power_off(struct tegra_channel *chan) +{ + int ret = 0; + struct tegra_mc_vi *vi; + struct tegra_csi_device *csi; + + vi = chan->vi; + csi = vi->csi; + + ret = tegra_channel_set_power(chan, 0); + if (ret < 0) + dev_err(vi->dev, "Failed to power off subdevices\n"); + + tegra_vi5_disable(vi); +} + +struct tegra_vi_fops vi5_fops = { + .vi_power_on = vi5_power_on, + .vi_power_off = vi5_power_off, + .vi_start_streaming = vi5_channel_start_streaming, + .vi_stop_streaming = vi5_channel_stop_streaming, + .vi_setup_queue = vi5_channel_setup_queue, + .vi_error_recover = vi5_channel_error_recover, + .vi_add_ctrls = vi5_add_ctrls, + .vi_init_video_formats = vi5_init_video_formats, + .vi_unit_get_device_handle = vi5_unit_get_device_handle, +}; +EXPORT_SYMBOL(vi5_fops); diff --git a/drivers/media/platform/tegra/camera/vi/vi5_fops.h b/drivers/media/platform/tegra/camera/vi/vi5_fops.h new file mode 100644 index 00000000..761a7cc4 --- /dev/null +++ b/drivers/media/platform/tegra/camera/vi/vi5_fops.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra Video Input 5 device common APIs + * + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __T186_VI5_H__ +#define __T186_VI5_H__ + +extern struct tegra_vi_fops vi5_fops; + +#endif diff --git a/drivers/media/platform/tegra/camera/vi/vi5_formats.h b/drivers/media/platform/tegra/camera/vi/vi5_formats.h new file mode 100644 index 00000000..a41f6317 --- /dev/null +++ b/drivers/media/platform/tegra/camera/vi/vi5_formats.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * NVIDIA Tegra Video Input Device Driver VI5 formats + * + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __VI5_FORMATS_H_ +#define __VI5_FORMATS_H_ + +#include + +/* + * These go into the VI_CHn_PIXFMT_FORMAT register bits 7:0 + * Output pixel memory format for the VI channel. + */ +enum tegra_image_format { + TEGRA_IMAGE_FORMAT_T_R5G6B5 = 1, + TEGRA_IMAGE_FORMAT_T_B5G6R5, + + TEGRA_IMAGE_FORMAT_T_R8 = 5, + + TEGRA_IMAGE_FORMAT_T_A8B8G8R8 = 8, + TEGRA_IMAGE_FORMAT_T_A8R8G8B8, + TEGRA_IMAGE_FORMAT_T_B8G8R8A8, + TEGRA_IMAGE_FORMAT_T_R8G8B8A8, + + TEGRA_IMAGE_FORMAT_T_Y8_U8__Y8_V8 = 16, + TEGRA_IMAGE_FORMAT_T_Y8_V8__Y8_U8, + TEGRA_IMAGE_FORMAT_T_V8_Y8__U8_Y8, + TEGRA_IMAGE_FORMAT_T_U8_Y8__V8_Y8, + + TEGRA_IMAGE_FORMAT_T_Y8__U8V8_N420 = 34, + TEGRA_IMAGE_FORMAT_T_Y8__V8U8_N420, + + TEGRA_IMAGE_FORMAT_T_B5G5R5A1 = 42, + TEGRA_IMAGE_FORMAT_T_R5G5B5A1, + TEGRA_IMAGE_FORMAT_T_Y8__U8V8_N422, + TEGRA_IMAGE_FORMAT_T_Y8__V8U8_N422, + TEGRA_IMAGE_FORMAT_T_Y8__U8__V8_N422, + TEGRA_IMAGE_FORMAT_T_Y8__U8__V8_N420, + + TEGRA_IMAGE_FORMAT_T_DPCM_RAW10 = 64, + + TEGRA_IMAGE_FORMAT_T_A2B10G10R10 = 68, + TEGRA_IMAGE_FORMAT_T_A2R10G10B10, + TEGRA_IMAGE_FORMAT_T_B10G10R10A2, + TEGRA_IMAGE_FORMAT_T_R10G10B10A2, + + TEGRA_IMAGE_FORMAT_T_A4B4G4R4 = 80, + TEGRA_IMAGE_FORMAT_T_A4R4G4B4, + TEGRA_IMAGE_FORMAT_T_B4G4R4A4, + TEGRA_IMAGE_FORMAT_T_R4G4B4A4, + TEGRA_IMAGE_FORMAT_T_A1B5G5R5, + TEGRA_IMAGE_FORMAT_T_A1R5G5B5, + + TEGRA_IMAGE_FORMAT_T_Y10__V10U10_N420 = 98, + TEGRA_IMAGE_FORMAT_T_Y10__U10V10_N420, + TEGRA_IMAGE_FORMAT_T_Y10__U10__V10_N420, + TEGRA_IMAGE_FORMAT_T_Y10__V10U10_N422, + TEGRA_IMAGE_FORMAT_T_Y10__U10V10_N422, + TEGRA_IMAGE_FORMAT_T_Y10__U10__V10_N422, + + TEGRA_IMAGE_FORMAT_T_DPCM_RAW12 = 128, + + TEGRA_IMAGE_FORMAT_T_R16_ISP = 194, + TEGRA_IMAGE_FORMAT_T_R16_F, + TEGRA_IMAGE_FORMAT_T_R16, + TEGRA_IMAGE_FORMAT_T_R16_I, + + TEGRA_IMAGE_FORMAT_T_R32 = 230, + + TEGRA_IMAGE_FORMAT_T_R32_F = 232, + + TEGRA_IMAGE_FORMAT_T_DPCM_RAW16 = 254, + TEGRA_IMAGE_FORMAT_T_DPCM_RAW20, +}; + +static const struct tegra_video_format vi5_video_formats[] = { + /* RAW 6: TODO */ + + /* RAW 7: TODO */ + + /* RAW 8 */ + TEGRA_VIDEO_FORMAT(RAW8, 8, SRGGB8_1X8, 1, 1, T_R8, + RAW8, SRGGB8, "RGRG.. GBGB.."), + TEGRA_VIDEO_FORMAT(RAW8, 8, SGRBG8_1X8, 1, 1, T_R8, + RAW8, SGRBG8, "GRGR.. BGBG.."), + TEGRA_VIDEO_FORMAT(RAW8, 8, SGBRG8_1X8, 1, 1, T_R8, + RAW8, SGBRG8, "GBGB.. RGRG.."), + TEGRA_VIDEO_FORMAT(RAW8, 8, SBGGR8_1X8, 1, 1, T_R8, + RAW8, SBGGR8, "BGBG.. GRGR.."), + + /* RAW 10 */ + TEGRA_VIDEO_FORMAT(RAW10, 10, SRGGB10_1X10, 2, 1, T_R16, + RAW10, SRGGB10, "RGRG.. GBGB.."), + TEGRA_VIDEO_FORMAT(RAW10, 10, SGRBG10_1X10, 2, 1, T_R16, + RAW10, SGRBG10, "GRGR.. BGBG.."), + TEGRA_VIDEO_FORMAT(RAW10, 10, SGBRG10_1X10, 2, 1, T_R16, + RAW10, SGBRG10, "GBGB.. RGRG.."), + TEGRA_VIDEO_FORMAT(RAW10, 10, SBGGR10_1X10, 2, 1, T_R16, + RAW10, SBGGR10, "BGBG.. GRGR.."), + + /* RAW 12 */ + TEGRA_VIDEO_FORMAT(RAW12, 12, SRGGB12_1X12, 2, 1, T_R16, + RAW12, SRGGB12, "RGRG.. GBGB.."), + TEGRA_VIDEO_FORMAT(RAW12, 12, SGRBG12_1X12, 2, 1, T_R16, + RAW12, SGRBG12, "GRGR.. BGBG.."), + TEGRA_VIDEO_FORMAT(RAW12, 12, SGBRG12_1X12, 2, 1, T_R16, + RAW12, SGBRG12, "GBGB.. RGRG.."), + TEGRA_VIDEO_FORMAT(RAW12, 12, SBGGR12_1X12, 2, 1, T_R16, + RAW12, SBGGR12, "BGBG.. GRGR.."), + + /* RGB888 */ + TEGRA_VIDEO_FORMAT(RGB888, 24, RGB888_1X24, 4, 1, T_A8R8G8B8, + RGB888, ABGR32, "BGRA-8-8-8-8"), + TEGRA_VIDEO_FORMAT(RGB888, 24, RGB888_1X32_PADHI, 4, 1, T_A8B8G8R8, + RGB888, RGB32, "RGB-8-8-8-8"), + + /* YUV422 */ + TEGRA_VIDEO_FORMAT(YUV422, 16, UYVY8_1X16, 2, 1, T_U8_Y8__V8_Y8, + YUV422_8, UYVY, "YUV 4:2:2"), + TEGRA_VIDEO_FORMAT(YUV422, 16, VYUY8_1X16, 2, 1, T_V8_Y8__U8_Y8, + YUV422_8, VYUY, "YUV 4:2:2"), + TEGRA_VIDEO_FORMAT(YUV422, 16, YUYV8_1X16, 2, 1, T_Y8_U8__Y8_V8, + YUV422_8, YUYV, "YUV 4:2:2"), + TEGRA_VIDEO_FORMAT(YUV422, 16, YVYU8_1X16, 2, 1, T_Y8_V8__Y8_U8, + YUV422_8, YVYU, "YUV 4:2:2"), + TEGRA_VIDEO_FORMAT(YUV422, 16, UYVY8_1X16, 1, 1, T_Y8__V8U8_N422, + YUV422_8, NV16, "NV16"), + TEGRA_VIDEO_FORMAT(YUV422, 16, UYVY8_2X8, 2, 1, T_U8_Y8__V8_Y8, + YUV422_8, UYVY, "YUV 4:2:2 UYVY"), + TEGRA_VIDEO_FORMAT(YUV422, 16, VYUY8_2X8, 2, 1, T_V8_Y8__U8_Y8, + YUV422_8, VYUY, "YUV 4:2:2 VYUY"), + TEGRA_VIDEO_FORMAT(YUV422, 16, YUYV8_2X8, 2, 1, T_Y8_U8__Y8_V8, + YUV422_8, YUYV, "YUV 4:2:2 YUYV"), + TEGRA_VIDEO_FORMAT(YUV422, 16, YVYU8_2X8, 2, 1, T_Y8_V8__Y8_U8, + YUV422_8, YVYU, "YUV 4:2:2 YVYU"), +}; + +#endif diff --git a/drivers/media/platform/tegra/mipical/mipi_cal.h b/drivers/media/platform/tegra/mipical/mipi_cal.h new file mode 100644 index 00000000..229c834a --- /dev/null +++ b/drivers/media/platform/tegra/mipical/mipi_cal.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2022, NVIDIA CORPORATION, All rights reserved. + */ + +#ifndef MIPI_CAL_H +#define MIPI_CAL_H + +#define DSID (1 << 31) +#define DSIC (1 << 30) +#define DSIB (1 << 29) +#define DSIA (1 << 28) +#define CSIH (1 << 27) +#define CSIG (1 << 26) +#define CSIF (1 << 25) +#define CSIE (1 << 24) +#define CSID (1 << 23) +#define CSIC (1 << 22) +#define CSIB (1 << 21) +#define CSIA (1 << 20) +#define CPHY_MASK 1 + +#ifdef CONFIG_TEGRA_MIPI_CAL +int tegra_mipi_bias_pad_enable(void); +int tegra_mipi_bias_pad_disable(void); +int tegra_mipi_calibration(int lanes); +int tegra_mipi_poweron(bool enable); +#else +static inline int tegra_mipi_bias_pad_enable(void) +{ + return 0; +} +static inline int tegra_mipi_bias_pad_disable(void) +{ + return 0; +} +static inline int tegra_mipi_calibration(int lanes) +{ + return 0; +} +static inline int tegra_mipi_poweron(bool enable) +{ + return 0; +} +#endif +#endif diff --git a/drivers/video/tegra/Makefile b/drivers/video/tegra/Makefile index bdb4be9c..a81bf6d0 100644 --- a/drivers/video/tegra/Makefile +++ b/drivers/video/tegra/Makefile @@ -6,3 +6,4 @@ obj-m += host/nvdla/ obj-m += host/pva/ obj-m += tsec/ obj-m += dc/bridge/ +obj-m += camera/ diff --git a/drivers/video/tegra/camera/Makefile b/drivers/video/tegra/camera/Makefile new file mode 100644 index 00000000..af2ada4f --- /dev/null +++ b/drivers/video/tegra/camera/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/host +LINUXINCLUDE += -DCONFIG_TEGRA_HOST1X + +obj-m += tegra_camera_platform.o diff --git a/drivers/video/tegra/camera/tegra_camera_dev_mfi.c b/drivers/video/tegra/camera/tegra_camera_dev_mfi.c new file mode 100644 index 00000000..91bc2a40 --- /dev/null +++ b/drivers/video/tegra/camera/tegra_camera_dev_mfi.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + +#include + +#include + +static LIST_HEAD(cmfidev_list); +static DEFINE_MUTEX(cmfidev_mutex); + +static int tegra_camera_dev_mfi_init(void) +{ + INIT_LIST_HEAD(&cmfidev_list); + + return 0; +} + +void tegra_camera_dev_mfi_cb(void *stub) +{ + u32 idx = 0; + struct camera_mfi_dev *itr = NULL; + int err = 0; + mutex_lock(&cmfidev_mutex); + list_for_each_entry(itr, &cmfidev_list, list) { + if (itr->regmap) { + /* MFI driver has to delay the focuser writes by one + * frame, which is required to get sync in focus + * position and sharpness. + * So write previous frame focuser settings in current + * frame's callback, and then save current frame focuser + * writes for next callback. + */ + for (idx = 0; idx < itr->prev_num_used; idx++) { + err = regmap_write(itr->regmap, + itr->prev_reg[idx].addr, + itr->prev_reg[idx].val); + if (err) + pr_err("%s: [%s] regmap_write failed\n", + __func__, itr->name); + } + /* Consume current settings, which would be programmed + * in next frame callback. + */ + for (idx = 0; idx < itr->num_used; idx++) { + itr->prev_reg[idx].addr = itr->reg[idx].addr; + itr->prev_reg[idx].val = itr->reg[idx].val; + } + itr->prev_num_used = itr->num_used; + } else if (itr->i2c_client) { + for (idx = 0; idx < itr->num_used; idx++) { + err = i2c_transfer(itr->i2c_client->adapter, + &itr->msg[idx].msg, 1); + if (err != 1) + pr_err("%s: [%s] i2c_transfer failed\n", + __func__, itr->name); + } + } else { + pr_err("%s [%s] Unknown device mechanism\n", + __func__, itr->name); + } + itr->num_used = 0; + } + mutex_unlock(&cmfidev_mutex); +} +EXPORT_SYMBOL(tegra_camera_dev_mfi_cb); + +int tegra_camera_dev_mfi_wr_add_i2c( + struct camera_mfi_dev *cmfidev, + struct i2c_msg *msg, int num) +{ + int err = -ENODEV; + int i = 0; + struct camera_mfi_dev *itr = NULL; + + if (!strcmp(cmfidev->name, "")) { + err = -EINVAL; + goto cmfi_wr_add_i2c_end; + } + + mutex_lock(&cmfidev_mutex); + list_for_each_entry(itr, &cmfidev_list, list) { + if (!strcmp(itr->name, cmfidev->name)) { + if (itr->num_used == CAMERA_REGCACHE_MAX) + err = -ENOSPC; + else { + for (i = 0; i < num; i++) { + itr->msg[itr->num_used].msg = msg[i]; + memcpy(itr->msg[itr->num_used].buf, + msg[i].buf, + msg[i].len); + itr->msg[itr->num_used].msg.buf = + itr->msg[itr->num_used].buf; + itr->num_used++; + } + err = 0; + } + } + } + mutex_unlock(&cmfidev_mutex); + +cmfi_wr_add_i2c_end: + return err; +} +EXPORT_SYMBOL(tegra_camera_dev_mfi_wr_add_i2c); + +int tegra_camera_dev_mfi_wr_add( + struct camera_mfi_dev *cmfidev, + u32 offset, u32 val) +{ + int err = -ENODEV; + struct camera_mfi_dev *itr = NULL; + + if (!strcmp(cmfidev->name, "")) { + err = -EINVAL; + goto cmfi_wr_add_end; + } + + mutex_lock(&cmfidev_mutex); + list_for_each_entry(itr, &cmfidev_list, list) { + if (!strcmp(itr->name, cmfidev->name)) { + if (itr->num_used == CAMERA_REGCACHE_MAX) { + err = -ENOSPC; + } else { + itr->reg[itr->num_used].addr = offset; + itr->reg[itr->num_used].val = val; + itr->num_used++; + err = 0; + } + } + } + mutex_unlock(&cmfidev_mutex); + +cmfi_wr_add_end: + return err; +} +EXPORT_SYMBOL(tegra_camera_dev_mfi_wr_add); + +int tegra_camera_dev_mfi_clear(struct camera_mfi_dev *cmfidev) +{ + int err = -ENODEV; + struct camera_mfi_dev *itr = NULL; + + if (cmfidev == NULL) { + err = -EINVAL; + goto cmfidev_clear_end; + } + + if (!strcmp(cmfidev->name, "")) { + err = -EINVAL; + goto cmfidev_clear_end; + } + + mutex_lock(&cmfidev_mutex); + list_for_each_entry(itr, &cmfidev_list, list) { + if (!strcmp(itr->name, cmfidev->name)) { + if (itr->num_used > 0) + pr_info("%s [%s] force clear Q pending writes\n", + __func__, itr->name); + itr->num_used = 0; + err = 0; + } + } + mutex_unlock(&cmfidev_mutex); + +cmfidev_clear_end: + return err; +} +EXPORT_SYMBOL(tegra_camera_dev_mfi_clear); + +int tegra_camera_dev_mfi_add_i2cclient( + struct camera_mfi_dev **cmfidev, + u8 *name, + struct i2c_client *i2c_client) +{ + int err = 0; + struct camera_mfi_dev *itr = NULL; + struct camera_mfi_dev *new_cmfidev = NULL; + + if (name == NULL || !strcmp(name, "")) + return -EINVAL; + + mutex_lock(&cmfidev_mutex); + list_for_each_entry(itr, &cmfidev_list, list) { + if (!strcmp(itr->name, name)) { + err = -EEXIST; + goto cmfidev_add_i2c_unlock; + } + } + if (!err) { + new_cmfidev = + kzalloc(sizeof(struct camera_mfi_dev), GFP_KERNEL); + if (!new_cmfidev) { + pr_err("%s memory low!\n", __func__); + err = -ENOMEM; + goto cmfidev_add_i2c_unlock; + } + memset(new_cmfidev, 0, sizeof(struct camera_mfi_dev)); + strncpy(new_cmfidev->name, name, sizeof(new_cmfidev->name)-1); + INIT_LIST_HEAD(&new_cmfidev->list); + new_cmfidev->i2c_client = i2c_client; + new_cmfidev->num_used = 0; + list_add(&new_cmfidev->list, &cmfidev_list); + } + + *cmfidev = new_cmfidev; + +cmfidev_add_i2c_unlock: + mutex_unlock(&cmfidev_mutex); + + return err; +} +EXPORT_SYMBOL(tegra_camera_dev_mfi_add_i2cclient); + +int tegra_camera_dev_mfi_add_regmap( + struct camera_mfi_dev **cmfidev, + u8 *name, + struct regmap *regmap) +{ + int err = 0; + struct camera_mfi_dev *itr = NULL; + struct camera_mfi_dev *new_cmfidev = NULL; + + if (name == NULL || !strcmp(name, "")) + return -EINVAL; + + mutex_lock(&cmfidev_mutex); + list_for_each_entry(itr, &cmfidev_list, list) { + if (!strcmp(itr->name, name)) { + err = -EEXIST; + goto cmfidev_add_regmap_unlock; + } + } + if (!err) { + new_cmfidev = + kzalloc(sizeof(struct camera_mfi_dev), GFP_KERNEL); + if (!new_cmfidev) { + pr_err("%s memory low!\n", __func__); + err = -ENOMEM; + goto cmfidev_add_regmap_unlock; + } + memset(new_cmfidev, 0, sizeof(struct camera_mfi_dev)); + strncpy(new_cmfidev->name, name, sizeof(new_cmfidev->name)-1); + INIT_LIST_HEAD(&new_cmfidev->list); + new_cmfidev->regmap = regmap; + new_cmfidev->num_used = 0; + new_cmfidev->prev_num_used = 0; + if (list_empty(&cmfidev_list)) + tegra_camera_dev_mfi_init(); + list_add(&new_cmfidev->list, &cmfidev_list); + } + + *cmfidev = new_cmfidev; + +cmfidev_add_regmap_unlock: + mutex_unlock(&cmfidev_mutex); + + return err; +} +EXPORT_SYMBOL(tegra_camera_dev_mfi_add_regmap); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/tegra/camera/tegra_camera_platform.c b/drivers/video/tegra/camera/tegra_camera_platform.c new file mode 100644 index 00000000..c8d3b705 --- /dev/null +++ b/drivers/video/tegra/camera/tegra_camera_platform.c @@ -0,0 +1,1189 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2015-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) +#include +#include +#endif +#define CAMDEV_NAME "tegra_camera_ctrl" + +/* Peak BPP for any of the YUV/Bayer formats */ +#define CAMERA_PEAK_BPP 2 + +#define LANE_SPEED_1_GBPS 1000000000 +#define LANE_SPEED_1_5_GBPS 1500000000 + +#if defined(CONFIG_TEGRA_BWMGR) +#include +#endif + +struct tegra_camera_info { + char devname[64]; + atomic_t in_use; + struct device *dev; +#if defined(CONFIG_TEGRA_BWMGR) + /* bandwidth manager handle */ + struct tegra_bwmgr_client *bwmgr_handle; +#endif + struct clk *emc; + struct clk *iso_emc; +#if defined(CONFIG_TEGRA_ISOMGR) + tegra_isomgr_handle isomgr_handle; + u64 max_bw; +#endif +#if defined(CONFIG_INTERCONNECT) + int icc_iso_id; + struct icc_path *icc_iso_path_handle; + int icc_noniso_id; + struct icc_path *icc_noniso_path_handle; + struct mutex icc_noniso_path_handle_lock; +#endif + struct mutex update_bw_lock; + u64 vi_mode_isobw; + u64 bypass_mode_isobw; + /* set max bw by default */ + bool en_max_bw; + + u64 phy_pixel_rate; + u64 active_pixel_rate; + u64 active_iso_bw; + u32 max_pixel_depth; + u32 ppc_divider; + u32 num_active_streams; + u32 num_device_lanes; + u32 sensor_type; + u32 memory_latency; + bool pg_mode; + struct list_head device_list; + struct mutex device_list_mutex; +}; + +static const struct of_device_id tegra_camera_of_ids[] = { + { .compatible = "nvidia, tegra-camera-platform" }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_camera_of_ids); + +static struct miscdevice tegra_camera_misc; + +static int tegra_camera_isomgr_register(struct tegra_camera_info *info, + struct device *dev) +{ +#if defined(CONFIG_TEGRA_ISOMGR) || \ + (IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST)) + int ret = 0; + u32 num_csi_lanes = 0; + u32 max_lane_speed = 0; + u32 bits_per_pixel = 0; + u32 vi_bpp = 0; + u64 vi_iso_bw = 0; + u32 vi_margin_pct = 0; + u32 max_pixel_rate = 0; + u32 isp_bpp = 0; + u64 isp_iso_bw = 0; + u32 isp_margin_pct = 0; + u32 tpg_max_iso = 0; + struct device_node *np = dev->of_node; + + dev_dbg(info->dev, "%s++\n", __func__); + + ret |= of_property_read_u32(np, "num_csi_lanes", &num_csi_lanes); + ret |= of_property_read_u32(np, "max_lane_speed", &max_lane_speed); + ret |= of_property_read_u32(np, "min_bits_per_pixel", &bits_per_pixel); + ret |= of_property_read_u32(np, "vi_peak_byte_per_pixel", &vi_bpp); + ret |= of_property_read_u32(np, "vi_bw_margin_pct", &vi_margin_pct); + ret |= of_property_read_u32(np, "max_pixel_rate", &max_pixel_rate); + ret |= of_property_read_u32(np, "isp_peak_byte_per_pixel", &isp_bpp); + ret |= of_property_read_u32(np, "isp_bw_margin_pct", &isp_margin_pct); + + if (ret) + dev_info(info->dev, "%s: some fields not in DT.\n", __func__); + + /* + * Use per-camera specifics to calculate ISO BW needed, + * which is smaller than the per-asic max. + * + * The formula for VI ISO BW is based on total number + * of active csi lanes when all cameras on the camera + * board are active. + * + * The formula for ISP ISO BW is based on max number + * of ISP's used in ISO mode given number of camera(s) + * on the camera board and the number of ISP's on the ASIC. + * + * The final ISO BW is based on the max of the two. + */ + if (!bits_per_pixel) { + dev_err(info->dev, "bits_per_pixel is invalid\n"); + return -EINVAL; + } + vi_iso_bw = ((num_csi_lanes * max_lane_speed) / bits_per_pixel) + * vi_bpp * (100 + vi_margin_pct) / 100; + isp_iso_bw = max_pixel_rate * isp_bpp * (100 + isp_margin_pct) / 100; + if (vi_iso_bw > isp_iso_bw) + info->max_bw = vi_iso_bw; + else + info->max_bw = isp_iso_bw; + + if (!info->max_bw) { + dev_err(info->dev, "%s: BW must be non-zero\n", __func__); + return -EINVAL; + } + + ret = of_property_read_u32(np, "tpg_max_iso", &tpg_max_iso); + if (ret) + tpg_max_iso = 0; + else { + dev_info(info->dev, "%s tpg_max_iso = %uKBs\n", __func__, + tpg_max_iso); + info->max_bw = max_t(u64, info->max_bw, tpg_max_iso); + } +#endif + +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) + /* For T194 and earlier chips Interconnect is not supported. */ + if (tegra_get_chip_id() == TEGRA234) { + if (info->icc_iso_id) { + info->icc_iso_path_handle = + icc_get(dev, info->icc_iso_id, TEGRA_ICC_PRIMARY); + if (IS_ERR_OR_NULL(info->icc_iso_path_handle)) { + dev_err(info->dev, + "%s unable to get icc path (err=%ld)\n", + __func__, PTR_ERR(info->icc_iso_path_handle)); + return -ENOMEM; + } + } + dev_info(info->dev, "%s vi_iso_bw=%llu, max_bw=%llu\n", + __func__, vi_iso_bw, info->max_bw); + return 0; + } +#endif + +#if defined(CONFIG_TEGRA_ISOMGR) + /* Register with max possible BW for CAMERA usecases.*/ + info->isomgr_handle = tegra_isomgr_register( + TEGRA_ISO_CLIENT_TEGRA_CAMERA, + info->max_bw, + NULL, /* tegra_isomgr_renegotiate */ + NULL); /* *priv */ + + if (IS_ERR(info->isomgr_handle)) { + /* Defer probe if isomgr is not up */ + if (info->isomgr_handle == ERR_PTR(-EAGAIN)) + return -EPROBE_DEFER; + dev_err(info->dev, + "%s: unable to register to isomgr\n", + __func__); + return -ENOMEM; + } +#endif + +#if defined(CONFIG_TEGRA_ISOMGR) || \ + (IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST)) + dev_info(info->dev, "%s isp_iso_bw=%llu, vi_iso_bw=%llu, max_bw=%llu\n", + __func__, isp_iso_bw, vi_iso_bw, info->max_bw); +#endif + + return 0; +} + +static int tegra_camera_isomgr_unregister(struct tegra_camera_info *info) +{ +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) + if (tegra_get_chip_id() == TEGRA234) { + icc_put(info->icc_iso_path_handle); + info->icc_iso_path_handle = NULL; + } +#endif + +#if defined(CONFIG_TEGRA_ISOMGR) + tegra_isomgr_unregister(info->isomgr_handle); + info->isomgr_handle = NULL; +#endif + + return 0; +} + +static int tegra_camera_isomgr_request( + struct tegra_camera_info *info, uint iso_bw, uint lt) +{ + int ret = 0; + + dev_dbg(info->dev, + "%s++ bw=%u, lt=%u\n", __func__, iso_bw, lt); + +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) + if (tegra_get_chip_id() == TEGRA234) { + /* VI6 does not tolerate DVFS, so we need to request max DRAM floor */ + ret = icc_set_bw(info->icc_iso_path_handle, + iso_bw, UINT_MAX); + if (ret) { + dev_err(info->dev, + "%s: ICC failed to reserve %u KBps\n", + __func__, iso_bw); + } + + return ret; + } +#endif + +#if defined(CONFIG_TEGRA_ISOMGR) + if (!info->isomgr_handle) { + dev_err(info->dev, + "%s: isomgr_handle is NULL\n", + __func__); + return -EINVAL; + } + + /* return value of tegra_isomgr_reserve is dvfs latency in usec */ + ret = tegra_isomgr_reserve(info->isomgr_handle, + iso_bw, /* KB/sec */ + lt); /* usec */ + if (!ret) { + dev_err(info->dev, + "%s: failed to reserve %u KBps\n", __func__, iso_bw); + return -ENOMEM; + } + + /* return value of tegra_isomgr_realize is dvfs latency in usec */ + ret = tegra_isomgr_realize(info->isomgr_handle); + if (ret) + dev_dbg(info->dev, + "%s: tegra_camera isomgr latency is %d usec", + __func__, ret); + else { + dev_err(info->dev, + "%s: failed to realize %u KBps\n", __func__, iso_bw); + return -ENOMEM; + } +#endif + + return 0; +} + +int tegra_camera_emc_clk_enable(void) +{ +#if defined(CONFIG_TEGRA_BWMGR) + return 0; +#else + struct tegra_camera_info *info; + int ret = 0; + + info = dev_get_drvdata(tegra_camera_misc.parent); + if (!info) + return -EINVAL; + ret = clk_prepare_enable(info->emc); + if (ret) { + dev_err(info->dev, "Cannot enable camera.emc\n"); + return ret; + } + + ret = clk_prepare_enable(info->iso_emc); + if (ret) { + dev_err(info->dev, "Cannot enable camera_iso.emc\n"); + goto err_iso_emc; + } + + return 0; +err_iso_emc: + clk_disable_unprepare(info->emc); + return ret; +#endif +} +EXPORT_SYMBOL(tegra_camera_emc_clk_enable); + +int tegra_camera_emc_clk_disable(void) +{ +#if defined(CONFIG_TEGRA_BWMGR) + return 0; +#else + struct tegra_camera_info *info; + + info = dev_get_drvdata(tegra_camera_misc.parent); + if (!info) + return -EINVAL; + clk_disable_unprepare(info->emc); + clk_disable_unprepare(info->iso_emc); + return 0; +#endif +} +EXPORT_SYMBOL(tegra_camera_emc_clk_disable); + +static int tegra_camera_open(struct inode *inode, struct file *file) +{ + struct tegra_camera_info *info; + struct miscdevice *mdev; + + mdev = file->private_data; + info = dev_get_drvdata(mdev->parent); + file->private_data = info; + +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) + /* For T194 and earlier chips Interconnect is not supported. */ + if (tegra_get_chip_id() == TEGRA234) { + mutex_lock(&info->icc_noniso_path_handle_lock); + info->icc_noniso_id = TEGRA_ICC_ISP; + info->icc_noniso_path_handle = + icc_get(info->dev, + info->icc_noniso_id, TEGRA_ICC_PRIMARY); + mutex_unlock(&info->icc_noniso_path_handle_lock); + + if (IS_ERR_OR_NULL(info->icc_noniso_path_handle)) { + dev_err(info->dev, + "%s unable to get icc path (err=%ld)\n", + __func__, PTR_ERR(info->icc_noniso_path_handle)); + + return -ENOMEM; + } + + return 0; + } +#endif + +#if defined(CONFIG_TEGRA_BWMGR) + /* get bandwidth manager handle if needed */ + info->bwmgr_handle = + tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_CAMERA_NON_ISO); + + /* set the initial rate */ + if (IS_ERR_OR_NULL(info->bwmgr_handle)) { + info->bwmgr_handle = NULL; + return -ENODEV; + } + tegra_bwmgr_set_emc(info->bwmgr_handle, 0, + TEGRA_BWMGR_SET_EMC_SHARED_BW); + return 0; +#else + return tegra_camera_emc_clk_enable(); +#endif +} + +static int tegra_camera_release(struct inode *inode, struct file *file) +{ + + struct tegra_camera_info *info; + + info = file->private_data; +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) + if (tegra_get_chip_id() == TEGRA234) { + mutex_lock(&info->icc_noniso_path_handle_lock); + icc_put(info->icc_noniso_path_handle); + info->icc_noniso_path_handle = NULL; + mutex_unlock(&info->icc_noniso_path_handle_lock); + return 0; + } +#endif +#if defined(CONFIG_TEGRA_BWMGR) + tegra_bwmgr_unregister(info->bwmgr_handle); +#else + tegra_camera_emc_clk_disable(); +#endif + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static u64 vi_mode_d; +static u64 bypass_mode_d; + +static int dbgfs_tegra_camera_init(void) +{ + struct dentry *dir; + + dir = debugfs_create_dir("tegra_camera_platform", NULL); + if (!dir) + return -ENOMEM; + + debugfs_create_u64("vi", S_IRUGO, dir, &vi_mode_d); + debugfs_create_u64("scf", S_IRUGO, dir, &bypass_mode_d); + + return 0; +} +#endif + +/* + * submits total aggregated iso bw request to isomgr. + */ +int tegra_camera_update_isobw(void) +{ + struct tegra_camera_info *info; + unsigned long total_khz; + unsigned long bw; +#ifdef CONFIG_TEGRA_MC + unsigned long bw_mbps; +#endif + int ret = 0; + + if (tegra_camera_misc.parent == NULL) { + pr_info("driver not enabled, cannot update bw\n"); + return -ENODEV; + } + + info = dev_get_drvdata(tegra_camera_misc.parent); + if (!info) + return -ENODEV; + mutex_lock(&info->update_bw_lock); + + bw = info->active_iso_bw; + if (info->bypass_mode_isobw > info->active_iso_bw) + bw = info->bypass_mode_isobw; + + if (info->bypass_mode_isobw > 0) + info->num_active_streams++; + +#if defined(CONFIG_TEGRA_ISOMGR) + /* Bug 200323801 consider iso bw of both vi mode and vi-bypass mode */ + if (bw >= info->max_bw) { + dev_info(info->dev, + "%s: Warning, Requested ISO BW %lu has been capped to VI's max BW %llu\n", + __func__, bw, info->max_bw); + bw = info->max_bw; + } + + if (info->pg_mode) + bw = info->max_bw; +#endif + if (info->num_active_streams == 0) + bw = 0; + +#ifdef CONFIG_NV_TEGRA_MC + /* + * Different chip versions use different APIs to set LA for VI. + * If one fails, try another, and fail if both of them don't work. + * Convert bw from kbps to mbps, and round up to the next mbps to + * guarantee it's larger than the requested for LA/PTSA setting. + */ + bw_mbps = (bw / 1000U) + 1; + ret = tegra_set_camera_ptsa(TEGRA_LA_VI_W, bw_mbps, 1); + if (ret) { + ret = tegra_set_latency_allowance(TEGRA_LA_VI_W, bw_mbps); + if (ret) { + dev_err(info->dev, "%s: set la failed: %d\n", + __func__, ret); + mutex_unlock(&info->update_bw_lock); + return ret; + } + } +#endif + + /* Use Khz to prevent overflow */ + total_khz = 0U; + total_khz = min(ULONG_MAX / 1000, total_khz); + + dev_dbg(info->dev, "%s:Set iso bw %lu kbyteps at %lu KHz\n", + __func__, bw, total_khz); +#if !defined(CONFIG_TEGRA_BWMGR) + ret = clk_set_rate(info->iso_emc, total_khz * 1000); + if (ret) + dev_err(info->dev, "%s:Failed to set iso bw\n", + __func__); +#endif + /* + * Request to ISOMGR or ICC depending on chip version. + */ + ret = tegra_camera_isomgr_request(info, bw, info->memory_latency); + if (ret) { + dev_err(info->dev, + "%s: failed to reserve %lu KBps with isomgr\n", + __func__, bw); + mutex_unlock(&info->update_bw_lock); + return -ENOMEM; + } + + info->vi_mode_isobw = bw; +#ifdef CONFIG_DEBUG_FS + vi_mode_d = bw; + bypass_mode_d = info->bypass_mode_isobw; +#endif + + if (info->bypass_mode_isobw > 0) + info->num_active_streams--; + + mutex_unlock(&info->update_bw_lock); + return ret; +} +EXPORT_SYMBOL(tegra_camera_update_isobw); + +static long tegra_camera_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret = 0; + struct tegra_camera_info *info; + + info = file->private_data; + + switch (_IOC_NR(cmd)) { + case _IOC_NR(TEGRA_CAMERA_IOCTL_SET_BW): + { + struct bw_info kcopy; + unsigned long mc_khz = 0; + + memset(&kcopy, 0, sizeof(kcopy)); + + if (copy_from_user(&kcopy, (const void __user *)arg, + sizeof(struct bw_info))) { + dev_err(info->dev, "%s:Failed to get data from user\n", + __func__); + return -EFAULT; + } + + /* Use Khz to prevent overflow */ + mc_khz = 0; + mc_khz = min(ULONG_MAX / 1000, mc_khz); + + if (kcopy.is_iso) { + info->bypass_mode_isobw = kcopy.bw; + ret = tegra_camera_update_isobw(); + } else { + dev_dbg(info->dev, "%s:Set bw %llu at %lu KHz\n", + __func__, kcopy.bw, mc_khz); +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) + if (tegra_get_chip_id() == TEGRA234) { + mutex_lock(&info->icc_noniso_path_handle_lock); + ret = icc_set_bw(info->icc_noniso_path_handle, + (u32)(kcopy.bw & 0xFFFFFFFF), + (u32)(kcopy.bw & 0xFFFFFFFF)); + mutex_unlock(&info->icc_noniso_path_handle_lock); + if (ret) { + dev_err(info->dev, + "%s: ICC failed to reserve %u KBps\n", + __func__, (u32)(kcopy.bw & 0xFFFFFFFF)); + } + break; + } +#endif + ret = clk_set_rate(info->emc, mc_khz * 1000); + } + break; + } + + case _IOC_NR(TEGRA_CAMERA_IOCTL_GET_BW): + { + unsigned long mc_hz = 0; + u64 bw = 0; +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) + if (tegra_get_chip_id() == TEGRA234) { + dev_err(info->dev, + "%s:ioctl TEGRA_CAMERA_IOCTL_GET_BW not supported\n", + __func__); + return -EFAULT; + + } +#endif + + return -EFAULT; + break; + } + + case _IOC_NR(TEGRA_CAMERA_IOCTL_GET_CURR_REQ_ISO_BW): + { + struct tegra_camera_info *state; + u64 bw; + + state = dev_get_drvdata(tegra_camera_misc.parent); + if (!state) + return -ENODEV; + + mutex_lock(&state->update_bw_lock); + bw = state->vi_mode_isobw; + mutex_unlock(&state->update_bw_lock); + + if (copy_to_user((void __user *)arg, (const void *)&bw, + sizeof(bw))) { + dev_err(info->dev, + "%s:Failed to copy data to user\n", + __func__); + return -EFAULT; + } + + break; + } + + default: + break; + } + return ret; +} + +static const struct file_operations tegra_camera_ops = { + .owner = THIS_MODULE, + .open = tegra_camera_open, + .unlocked_ioctl = tegra_camera_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = tegra_camera_ioctl, +#endif + .release = tegra_camera_release, +}; + +static bool is_isomgr_up(struct device *dev) +{ +#if (IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST)) + if (tegra_get_chip_id() == TEGRA234) { + bool ret = false; + struct icc_path *icc_iso_path_handle = + icc_get(dev, TEGRA_ICC_VI, TEGRA_ICC_PRIMARY); + struct icc_path *icc_niso_path_handle = + icc_get(dev, TEGRA_ICC_ISP, TEGRA_ICC_PRIMARY); + + if (PTR_ERR(icc_iso_path_handle) == -EPROBE_DEFER || + PTR_ERR(icc_niso_path_handle) == -EPROBE_DEFER) { + dev_dbg(dev, + "%s unable to get icc path, as icc driver not up yet\n", + __func__); + ret = false; + } else if (icc_iso_path_handle == NULL || + icc_niso_path_handle == NULL) { + dev_err(dev, "%s ICC disabled\n", __func__); + ret = false; + } else if (IS_ERR(icc_iso_path_handle) || + IS_ERR(icc_niso_path_handle)) { + dev_err(dev, "%s icc error, iso : %ld non-iso : %ld\n", + __func__, PTR_ERR(icc_iso_path_handle), + PTR_ERR(icc_niso_path_handle)); + ret = false; + } else { + ret = true; + } + + if (!IS_ERR_OR_NULL(icc_iso_path_handle)) + icc_put(icc_iso_path_handle); + if (!IS_ERR_OR_NULL(icc_niso_path_handle)) + icc_put(icc_niso_path_handle); + + return ret; + } +#endif + +#if defined(CONFIG_TEGRA_ISOMGR) + if (tegra_get_chip_id() != TEGRA234) + return tegra_isomgr_init_status(); +#endif + +return true; +} + +static int tegra_camera_probe(struct platform_device *pdev) +{ + int ret; + struct tegra_camera_info *info; + + /* Defer the probe till isomgr is initialized */ + if (!is_isomgr_up(&pdev->dev)) { + dev_dbg(&pdev->dev, + "%s:camera_platform_driver probe deferred as isomgr not up\n", + __func__); + return -EPROBE_DEFER; + } + + dev_dbg(&pdev->dev, + "%s:tegra_camera_platform driver probe\n", __func__); + + tegra_camera_misc.minor = MISC_DYNAMIC_MINOR; + tegra_camera_misc.name = CAMDEV_NAME; + tegra_camera_misc.fops = &tegra_camera_ops; + tegra_camera_misc.parent = &pdev->dev; + + ret = misc_register(&tegra_camera_misc); + if (ret) { + dev_err(tegra_camera_misc.this_device, + "register failed for %s\n", tegra_camera_misc.name); + return ret; + } + + info = devm_kzalloc(tegra_camera_misc.this_device, + sizeof(struct tegra_camera_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + strcpy(info->devname, tegra_camera_misc.name); + info->dev = tegra_camera_misc.this_device; + +#if !defined(CONFIG_TEGRA_BWMGR) + info->emc = devm_clk_get(info->dev, "emc"); + if (IS_ERR(info->emc)) { + dev_err(info->dev, "Failed to get camera.emc\n"); + return -EINVAL; + } + clk_set_rate(info->emc, 0); + info->iso_emc = devm_clk_get(info->dev, "iso.emc"); + if (IS_ERR(info->iso_emc)) { + dev_err(info->dev, "Failed to get camera_iso.emc\n"); + return -EINVAL; + } + clk_set_rate(info->iso_emc, 0); +#endif + mutex_init(&info->update_bw_lock); +#if IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST) + info->icc_iso_id = TEGRA_ICC_VI; + mutex_init(&info->icc_noniso_path_handle_lock); +#endif + /* Register Camera as isomgr client. */ + ret = tegra_camera_isomgr_register(info, &pdev->dev); + if (ret) { + dev_err(info->dev, + "%s: failed to register CAMERA as isomgr client\n", + __func__); + return -ENOMEM; + } + + info->en_max_bw = of_property_read_bool(pdev->dev.of_node, + "default-max-bw"); + if (info->en_max_bw == true) { +#if defined(CONFIG_TEGRA_ISOMGR) || \ + (IS_ENABLED(CONFIG_INTERCONNECT) && IS_ENABLED(CONFIG_TEGRA_T23X_GRHOST)) + ret = tegra_camera_isomgr_request(info, info->max_bw, + info->memory_latency); + if (ret) { + dev_err(info->dev, + "%s: failed to request max bw\n", __func__); + tegra_camera_isomgr_unregister(info); + return -EFAULT; + } +#endif + } + info->phy_pixel_rate = 0; + info->active_pixel_rate = 0; + info->active_iso_bw = 0; + info->max_pixel_depth = 0; + info->ppc_divider = 1; + info->num_active_streams = 0; + info->num_device_lanes = 0; + info->sensor_type = 0; + info->memory_latency = 0; + info->pg_mode = false; + mutex_init(&info->device_list_mutex); + INIT_LIST_HEAD(&info->device_list); + + platform_set_drvdata(pdev, info); +#ifdef CONFIG_DEBUG_FS + ret = dbgfs_tegra_camera_init(); + if (ret) + dev_err(info->dev, "Fail to create debugfs"); +#endif + return 0; +} + +static void update_platform_data(struct tegra_camera_dev_info *cdev, + struct tegra_camera_info *info, bool dev_registered) +{ + /* TPG: handled differently based on + * throughput calculations. + */ + static u64 phy_pixel_rate_aggregated; + + if (cdev->sensor_type == SENSORTYPE_VIRTUAL) + info->pg_mode = dev_registered; + + if (cdev->sensor_type != SENSORTYPE_NONE) + info->sensor_type = cdev->sensor_type; + + if (cdev->hw_type != HWTYPE_NONE) { + if (info->num_device_lanes < cdev->lane_num) + info->num_device_lanes = cdev->lane_num; + } + + if (dev_registered) { + /* if bytes per pixel is greater than 2, then num_ppc + * is 4 for VI. Set divider for this case. + */ + if (cdev->bpp > 2) + info->ppc_divider = 2; + if (cdev->lane_num < info->num_device_lanes) { + // temp variable to store aggregated rate + phy_pixel_rate_aggregated += cdev->pixel_rate; + } else if (cdev->lane_num == info->num_device_lanes) { + if (info->phy_pixel_rate < cdev->pixel_rate) + info->phy_pixel_rate = cdev->pixel_rate; + } + + if (info->phy_pixel_rate < phy_pixel_rate_aggregated) + info->phy_pixel_rate = phy_pixel_rate_aggregated; + + if (info->max_pixel_depth < cdev->pixel_bit_depth) + info->max_pixel_depth = cdev->pixel_bit_depth; + if (info->memory_latency < cdev->memory_latency) + info->memory_latency = cdev->memory_latency; + } +} + +static int add_nvhost_client(struct tegra_camera_dev_info *cdev) +{ + int ret = 0; + + if (cdev->hw_type == HWTYPE_NONE) + return 0; + + ret = nvhost_module_add_client(cdev->pdev, &cdev->hw_type); + return ret; + return 0; +} + +static int remove_nvhost_client(struct tegra_camera_dev_info *cdev) +{ + if (cdev->hw_type == HWTYPE_NONE) + return 0; + + nvhost_module_remove_client(cdev->pdev, &cdev->hw_type); + return 0; +} + +int tegra_camera_device_register(struct tegra_camera_dev_info *cdev_info, + void *priv) +{ + int err = 0; + struct tegra_camera_dev_info *cdev; + struct tegra_camera_info *info; + + /* + * If tegra_camera_platform is not enabled, devices + * cannot be registered, but the HW engines will still be probed. + * So just return without registering. + */ + if (tegra_camera_misc.parent == NULL) { + pr_info("driver not enabled, cannot register any devices\n"); + return 0; + } + + if (!cdev_info || !priv) + return -EINVAL; + + info = dev_get_drvdata(tegra_camera_misc.parent); + if (!info) + return -EINVAL; + + cdev = kzalloc(sizeof(struct tegra_camera_dev_info), GFP_KERNEL); + if (!cdev) + return -ENOMEM; + + *cdev = *cdev_info; + + INIT_LIST_HEAD(&cdev->device_node); + cdev->priv = priv; + + mutex_lock(&info->device_list_mutex); + list_add(&cdev->device_node, &info->device_list); + err = add_nvhost_client(cdev); + if (err) { + mutex_unlock(&info->device_list_mutex); + dev_err(info->dev, "%s could not add %d to nvhost\n", + __func__, cdev->hw_type); + return err; + } + update_platform_data(cdev, info, true); + mutex_unlock(&info->device_list_mutex); + + return err; +} +EXPORT_SYMBOL(tegra_camera_device_register); + +int tegra_camera_device_unregister(void *priv) +{ + struct tegra_camera_dev_info *cdev, *tmp; + int found = 0; + struct tegra_camera_info *info; + + /* + * If tegra_camera_platform is not enabled, devices + * were not registered, so return here. + */ + if (tegra_camera_misc.parent == NULL) { + pr_info("driver not enabled, no devices were registered\n"); + return 0; + } + + info = dev_get_drvdata(tegra_camera_misc.parent); + if (!info) + return -EINVAL; + + mutex_lock(&info->device_list_mutex); + list_for_each_entry_safe(cdev, tmp, &info->device_list, device_node) { + if (priv == cdev->priv) { + list_del(&cdev->device_node); + found = 1; + break; + } + } + + if (found) { + remove_nvhost_client(cdev); + update_platform_data(cdev, info, false); + kfree(cdev); + } + mutex_unlock(&info->device_list_mutex); + + return 0; +} +EXPORT_SYMBOL(tegra_camera_device_unregister); + +int tegra_camera_get_device_list_entry(const u32 hw_type, const void *priv, + struct tegra_camera_dev_info *cdev_info) +{ + struct tegra_camera_dev_info *cdev; + struct tegra_camera_info *info; + int found = 0; + int ret = 0; + + if (tegra_camera_misc.parent == NULL) + return -EINVAL; + + info = dev_get_drvdata(tegra_camera_misc.parent); + if (!info) + return -EINVAL; + + mutex_lock(&info->device_list_mutex); + list_for_each_entry(cdev, &info->device_list, device_node) { + if (hw_type == cdev->hw_type) { + /* + * If priv is NULL yet the hw_type is set to that of a + * sensor the first sensor in the device list will be + * returned. Otherwise, a NULL priv and a matching + * hw_type will return the hw unit (VI, CSI etc.). + * + * Otherwise, a non NULL priv is used as an additional + * constraint to return specific devices who may or + * may not share common hw_types. + */ + if (!priv) { + found = 1; + break; + } else if (priv == cdev->priv) { + found = 1; + break; + } + } + } + + if (found && (cdev_info != NULL)) + *cdev_info = *cdev; + + mutex_unlock(&info->device_list_mutex); + + if (!found) + return -ENOENT; + + return ret; +} +EXPORT_SYMBOL(tegra_camera_get_device_list_entry); + +int tegra_camera_get_device_list_stats(u32 *n_sensors, u32 *n_hwtypes) +{ + struct tegra_camera_dev_info *cdev; + struct tegra_camera_info *info; + int ret = 0; + + if (!n_sensors || !n_hwtypes) + return -EINVAL; + + if (tegra_camera_misc.parent == NULL) + return -EINVAL; + + info = dev_get_drvdata(tegra_camera_misc.parent); + if (!info) + return -EINVAL; + + *n_sensors = 0; + *n_hwtypes = 0; + + mutex_lock(&info->device_list_mutex); + list_for_each_entry(cdev, &info->device_list, device_node) { + if (cdev->hw_type == HWTYPE_NONE) + (*n_sensors)++; + else + (*n_hwtypes)++; + } + mutex_unlock(&info->device_list_mutex); + + return ret; +} +EXPORT_SYMBOL(tegra_camera_get_device_list_stats); + +static int calculate_and_set_device_clock(struct tegra_camera_info *info, + struct tegra_camera_dev_info *cdev) +{ + int ret = 0; + u64 active_pr = info->active_pixel_rate; + u64 phy_pr = info->phy_pixel_rate; + u32 overhead = cdev->overhead + 100; + u32 max_depth = info->max_pixel_depth; + u32 bus_width = cdev->bus_width; + u32 lane_num = cdev->lane_num; + u64 lane_speed = cdev->lane_speed; + u32 ppc = (cdev->ppc) ? cdev->ppc : 1; + u32 ppc_divider = (ppc > 1) ? info->ppc_divider : 1; + u64 nr = 0; + u64 dr = 0; + u64 clk_rate = 0; + u64 final_pr = (cdev->use_max) ? phy_pr : active_pr; + bool set_clk = true; + + if (cdev->hw_type == HWTYPE_NONE) + return 0; + + switch (cdev->hw_type) { + case HWTYPE_CSI: + if (info->sensor_type == SENSORTYPE_SLVSEC) + set_clk = false; + nr = max_depth * final_pr * overhead; + dr = bus_width * 100; + if (dr == 0) + return -EINVAL; + break; + case HWTYPE_VI: + nr = final_pr * overhead; + dr = 100 * (ppc / ppc_divider); + break; + case HWTYPE_ISPA: + case HWTYPE_ISPB: + nr = final_pr * overhead; + dr = 100 * ppc; + break; + case HWTYPE_SLVSEC: + if (info->sensor_type != SENSORTYPE_SLVSEC) + set_clk = false; + nr = lane_speed * lane_num * overhead; + dr = bus_width * 100; + if (dr == 0) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* avoid rounding errors by adding dr to nr */ + clk_rate = (nr + dr) / dr; + + /* Use special rates based on throughput + * for TPG. + */ + if (info->pg_mode) { + clk_rate = (cdev->pg_clk_rate) ? + cdev->pg_clk_rate : DEFAULT_PG_CLK_RATE; + } + + /* no stream active, set to 0 */ + if (info->num_active_streams == 0) + clk_rate = 0; + + if (clk_rate != cdev->clk_rate) + cdev->clk_rate = clk_rate; + /*TODO OOT nvhost_module_set_rate, nvhost_module_get_rate + else + set_clk = false; + + if (set_clk) { + ret = nvhost_module_set_rate(cdev->pdev, &cdev->hw_type, + cdev->clk_rate, 0, NVHOST_CLOCK); + if (ret) + return ret; + + // save the actual rate set by nvhost + ret = nvhost_module_get_rate(cdev->pdev, + &cdev->actual_clk_rate, 0); + }*/ + + return ret; +} + +int tegra_camera_update_clknbw(void *priv, bool stream_on) +{ + struct tegra_camera_dev_info *cdev; + struct tegra_camera_info *info; + int ret = 0; + + info = dev_get_drvdata(tegra_camera_misc.parent); + if (!info) + return -EINVAL; + + mutex_lock(&info->device_list_mutex); + /* Need to traverse the list twice, first to make sure that + * stream on is set for the active stream and then to + * update clocks and BW. + * Needed as devices could have been added in any order in the list. + */ + list_for_each_entry(cdev, &info->device_list, device_node) { + if (priv == cdev->priv) { + /* set stream on */ + cdev->stream_on = stream_on; + if (stream_on) { + info->active_pixel_rate += cdev->pixel_rate; + info->active_iso_bw += cdev->bw; + info->num_active_streams++; + } else { + info->active_pixel_rate -= cdev->pixel_rate; + info->active_iso_bw -= cdev->bw; + info->num_active_streams--; + } + break; + } + } + + /* update clocks */ + list_for_each_entry(cdev, &info->device_list, device_node) { + ret = calculate_and_set_device_clock(info, cdev); + if (ret) { + mutex_unlock(&info->device_list_mutex); + return -EINVAL; + } + } + mutex_unlock(&info->device_list_mutex); + + /* set BW */ + tegra_camera_update_isobw(); + + return ret; +} +EXPORT_SYMBOL(tegra_camera_update_clknbw); + +static int tegra_camera_remove(struct platform_device *pdev) +{ + struct tegra_camera_info *info = platform_get_drvdata(pdev); + + dev_info(&pdev->dev, "%s:camera_platform_driver remove\n", __func__); + + /* deallocate isomgr bw */ + if (info->en_max_bw) + tegra_camera_isomgr_request(info, 0, info->memory_latency); + + tegra_camera_isomgr_unregister(info); + misc_deregister(&tegra_camera_misc); + + return 0; +} + +static struct platform_driver tegra_camera_driver = { + .probe = tegra_camera_probe, + .remove = tegra_camera_remove, + .driver = { + .owner = THIS_MODULE, + .name = "tegra_camera_platform", + .of_match_table = tegra_camera_of_ids + } +}; +static int __init tegra_camera_init(void) +{ + return platform_driver_register(&tegra_camera_driver); +} +static void __exit tegra_camera_exit(void) +{ + platform_driver_unregister(&tegra_camera_driver); +} + +module_init(tegra_camera_init); +module_exit(tegra_camera_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/tegra/host/host1x/host1x.h b/drivers/video/tegra/host/host1x/host1x.h new file mode 100644 index 00000000..879fc37b --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra Graphics Host Driver Entrypoint + * + * Copyright (c) 2010-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __NVHOST_HOST1X_H +#define __NVHOST_HOST1X_H + +#include +#include +#include + +#include "nvhost_syncpt.h" +#include "nvhost_channel.h" +#include "nvhost_intr.h" + +#define TRACE_MAX_LENGTH 128U +#define IFACE_NAME "nvhost" + +struct nvhost_chip_support; +struct nvhost_channel; +struct mem_mgr; + +extern long linsim_cl; + +/* + * Policy determines how do we store the syncpts, + * i.e. either per channel (in struct nvhost_channel) + * or per channel instance (in struct nvhost_channel_userctx) + */ +enum nvhost_syncpt_policy { + SYNCPT_PER_CHANNEL = 0, + SYNCPT_PER_CHANNEL_INSTANCE, +}; + +/* + * Policy determines when to map HW channel to device, + * i.e. either on channel device node open time + * or on work submission time + */ +enum nvhost_channel_policy { + MAP_CHANNEL_ON_OPEN = 0, + MAP_CHANNEL_ON_SUBMIT, +}; + +struct host1x_device_info { + /* Channel info */ + int nb_channels; /* host1x: num channels supported */ + int ch_base; /* host1x: channel base */ + int ch_limit; /* host1x: channel limit */ + enum nvhost_channel_policy channel_policy; /* host1x: channel policy */ + + /* Syncpoint info */ + int nb_hw_pts; /* host1x: num syncpoints supported + in h/w */ + int nb_pts; /* host1x: num syncpoints supported + in s/w where nb_pts <= nb_hw_pts */ + int pts_base; /* host1x: syncpoint base */ + int pts_limit; /* host1x: syncpoint limit */ + int nb_syncpt_irqs; /* host1x: number of syncpoint irqs */ + enum nvhost_syncpt_policy syncpt_policy; /* host1x: syncpoint policy */ + int nb_mlocks; /* host1x: number of mlocks */ + int (*initialize_chip_support)(struct nvhost_master *, + struct nvhost_chip_support *); + int nb_actmons; + /* true if host1x access direct but engines are not owned */ + bool vmserver_owns_engines; + /* true if hw supports remote syncpoint interrupts */ + bool use_cross_vm_interrupts; + /* host1x: reg resources */ + char *resources[NVHOST_MODULE_MAX_IORESOURCE_MEM]; + /* host1x: number of resources */ + int nb_resources; + /* cmdfifo only accessible from hypervisor? */ + bool secure_cmdfifo; + /* ctrl device node name if not default */ + const char *ctrl_name; + /* Size of a syncpoint page in the syncpoint<->mss interface */ + uint32_t syncpt_page_size; + /* If MLOCK locked-state can be written through register */ + bool rw_mlock_register; +}; + +struct nvhost_master { + void __iomem *aperture; + void __iomem *sync_aperture; + void __iomem *actmon_aperture; + struct class *nvhost_class; + struct cdev cdev; + struct device *ctrl; + struct nvhost_syncpt syncpt; + struct nvhost_intr intr; + struct platform_device *dev; + atomic_t clientid; + struct host1x_device_info info; + struct nvhost_characteristics nvhost_char; + struct kobject *caps_kobj; + struct nvhost_capability_node *caps_nodes; + int major; + int next_minor; + struct mutex chrdev_mutex; + struct mutex timeout_mutex; + + struct nvhost_channel **chlist; /* channel list */ + struct mutex chlist_mutex; /* mutex for channel list */ + struct mutex ch_alloc_mutex; /* mutex for channel allocation */ + struct semaphore free_channels; /* Semaphore tracking free channels */ + unsigned long allocated_channels[2]; + + /* nvhost vm specific structures */ + struct list_head vm_list; + struct mutex vm_mutex; + struct mutex vm_alloc_mutex; + + /* for nvhost_masters list */ + struct list_head list; + + struct rb_root syncpt_backing_head; +}; + +#ifdef CONFIG_DEBUG_FS +void nvhost_debug_init(struct nvhost_master *master); +void nvhost_device_debug_init(struct platform_device *dev); +void nvhost_device_debug_deinit(struct platform_device *dev); +void nvhost_debug_dump(struct nvhost_master *master); +#else +static inline void nvhost_debug_init(struct nvhost_master *master) +{ +} +static inline void nvhost_device_debug_init(struct platform_device *dev) +{ +} +static inline void nvhost_device_debug_deinit(struct platform_device *dev) +{ +} +static inline void nvhost_debug_dump(struct nvhost_master *master) +{ +} +#endif + +int nvhost_host1x_finalize_poweron(struct platform_device *dev); +int nvhost_host1x_prepare_poweroff(struct platform_device *dev); + +void nvhost_set_chanops(struct nvhost_channel *ch); + +int nvhost_gather_filter_enabled(struct nvhost_syncpt *sp); + +int nvhost_update_characteristics(struct platform_device *dev); + +static inline void *nvhost_get_falcon_data(struct platform_device *_dev) +{ + struct nvhost_device_data *pdata = + (struct nvhost_device_data *)platform_get_drvdata(_dev); + WARN_ON(!pdata); + return pdata ? pdata->falcon_data : NULL; +} + +static inline void nvhost_set_falcon_data(struct platform_device *_dev, + void *priv_data) +{ + struct nvhost_device_data *pdata = + (struct nvhost_device_data *)platform_get_drvdata(_dev); + WARN_ON(!pdata); + pdata->falcon_data = priv_data; +} + + +static inline void *nvhost_get_private_data(struct platform_device *_dev) +{ + struct nvhost_device_data *pdata = + (struct nvhost_device_data *)platform_get_drvdata(_dev); + WARN_ON(!pdata); + return pdata ? pdata->private_data : NULL; +} + +static inline void *nvhost_get_private_data_nowarn(struct platform_device *_dev) +{ + struct nvhost_device_data *pdata = + (struct nvhost_device_data *)platform_get_drvdata(_dev); + return pdata ? pdata->private_data : NULL; +} + +static inline void nvhost_set_private_data(struct platform_device *_dev, + void *priv_data) +{ + struct nvhost_device_data *pdata = + (struct nvhost_device_data *)platform_get_drvdata(_dev); + WARN_ON(!pdata); + pdata->private_data = priv_data; +} + +struct nvhost_master *nvhost_get_prim_host(void); + +static inline struct nvhost_master *nvhost_get_host( + struct platform_device *_dev) +{ + struct device *parent = _dev->dev.parent; + struct device *dev = &_dev->dev; + + /* + * host1x has no parent dev on non-DT configuration or has + * platform_bus on DT configuration. So search for a device + * whose parent is NULL or platform_bus + */ + while (parent && parent != &platform_bus) { + dev = parent; + parent = parent->parent; + } + + return nvhost_get_private_data(to_platform_device(dev)); +} + +static inline struct nvhost_master *nvhost_get_host_nowarn( + struct platform_device *_dev) +{ + struct device *parent = _dev->dev.parent; + struct device *dev = &_dev->dev; + + /* + * host1x has no parent dev on non-DT configuration or has + * platform_bus on DT configuration. So search for a device + * whose parent is NULL or platform_bus + */ + while (parent && parent != &platform_bus) { + dev = parent; + parent = parent->parent; + } + + return nvhost_get_private_data_nowarn(to_platform_device(dev)); +} + +static inline struct platform_device *nvhost_get_parent( + struct platform_device *_dev) +{ + return (_dev->dev.parent && _dev->dev.parent != &platform_bus) + ? to_platform_device(_dev->dev.parent) : NULL; +} + +struct nvhost_master *nvhost_get_syncpt_owner(u32 id); +struct nvhost_syncpt *nvhost_get_syncpt_owner_struct(u32 id, + struct nvhost_syncpt *default_syncpt); + +#endif diff --git a/drivers/video/tegra/host/nvcsi/deskew.h b/drivers/video/tegra/host/nvcsi/deskew.h new file mode 100644 index 00000000..c55f2257 --- /dev/null +++ b/drivers/video/tegra/host/nvcsi/deskew.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Deskew driver + * + * Copyright (c) 2018-2022 NVIDIA Corporation. All rights reserved. + */ + + +#ifndef __DESKEW_H__ +#define __DESKEW_H__ + +#include +#include +#include + + +//////////////////////////////////////////////////////////////// +// STREAM REGISTERS +//////////////////////////////////////////////////////////////// + +#define NVCSI_STREAM_0_ERROR_STATUS2VI_MASK regs[0] +#define NVCSI_STREAM_1_ERROR_STATUS2VI_MASK regs[1] +#define CFG_ERR_STATUS2VI_MASK_ALL regs[2] + +//////////////////////////////////////////////////////////////// +// PHY REGISTERS +//////////////////////////////////////////////////////////////// + +// PHY INTERRUPTS REGISTERS +#define NVCSI_PHY_0_CILA_INTR_STATUS regs[3] +// bits in register NVCSI_PHY_0_CILA_INTR_STATUS +#define intr_dphy_cil_deskew_calib_err_ctrl (1 << 27) +#define intr_dphy_cil_deskew_calib_err_lane1 (1 << 26) +#define intr_dphy_cil_deskew_calib_err_lane0 (1 << 25) +#define intr_dphy_cil_deskew_calib_done_ctrl (1 << 24) +#define intr_dphy_cil_deskew_calib_done_lane1 (1 << 23) +#define intr_dphy_cil_deskew_calib_done_lane0 (1 << 22) +#define NVCSI_PHY_0_CILA_INTR_MASK regs[4] +#define NVCSI_PHY_0_CILB_INTR_STATUS regs[5] +#define NVCSI_PHY_0_CILB_INTR_MASK regs[6] +// new registers in T194 +#define T194_NVCSI_PHY_0_CILA_INTR_1_STATUS 0x10404 +#define T194_NVCSI_PHY_0_CILA_INTR_1_MASK 0x1040c +#define T194_NVCSI_PHY_0_CILB_INTR_1_STATUS 0x10804 +#define T194_NVCSI_PHY_0_CILB_INTR_1_MASK 0x1080c + +//////////////////////////////////////////////////////////////// +// PHY DESKEW REGISTERS +//////////////////////////////////////////////////////////////// + +// XXX_OFFSET: address offset from NVCSI_CIL_PHY_CTRL_0 +#define NVCSI_PHY_0_NVCSI_CIL_PHY_CTRL_0 regs[7] +#define NVCSI_CIL_A_SW_RESET_0_OFFSET regs[8] +#define NVCSI_CIL_A_CLK_DESKEW_CTRL_0_OFFSET regs[9] +// bits in register NVCSI_CIL_A_CLK_DESKEW_CTRL_0 +#define CLK_INADJ_SWEEP_CTRL (0x1 << 15) +#define CLK_INADJ_LIMIT_HIGH (0x3f << 8) +#define CLK_INADJ_LIMIT_LOW 0x3f +#define NVCSI_CIL_A_DPHY_INADJ_CTRL_0_OFFSET regs[10] +// bits in register NVCSI_CIL_A_DPHY_INADJ_CTRL_0 +#define SW_SET_DPHY_INADJ_CLK (0x1 << 22) +#define DPHY_INADJ_CLK (0x3f << 16) +#define DPHY_INADJ_CLK_SHIFT 16 +#define SW_SET_DPHY_INADJ_IO1 (0x1 << 14) +#define DPHY_INADJ_IO1 (0x3f << 8) +#define DPHY_INADJ_IO1_SHIFT 8 +#define SW_SET_DPHY_INADJ_IO0 (0x1 << 6) +#define DPHY_INADJ_IO0 0x3f +#define DPHY_INADJ_IO0_SHIFT 0 + +#define NVCSI_CIL_A_DATA_DESKEW_CTRL_0_OFFSET regs[11] +// bits in register NVCSI_CIL_A_DATA_DESKEW_CTRL_0 +#define DATA_INADJ_SWEEP_CTRL1 (0x1 << 31) +#define DATA_INADJ_SWEEP_CTRL0 (0x1 << 15) +#define DATA_INADJ_LIMIT_HIGH1 (0x3f << 23) +#define DATA_INADJ_LIMIT_HIGH0 (0x3f << 8) + +#define NVCSI_CIL_A_DPHY_DESKEW_STATUS_0_OFFSET regs[12] +// bits in register NVCSI_CIL_A_DPHY_DESKEW_STATUS_0 +#define DPHY_CALIB_ERR_IO1 (0x1 << 15) +#define DPHY_CALIB_DONE_IO1 (0x1 << 14) +#define DPHY_CALIB_ERR_IO0 (0x1 << 7) +#define DPHY_CALIB_DONE_IO0 (0x1 << 6) + +#define NVCSI_CIL_A_DPHY_DESKEW_DATA_CALIB_STATUS_LOW_0_0_OFFSET regs[13] +#define NVCSI_CIL_A_DPHY_DESKEW_DATA_CALIB_STATUS_HIGH_0_0_OFFSET regs[14] +#define NVCSI_CIL_A_DPHY_DESKEW_CLK_CALIB_STATUS_LOW_0_0_OFFSET regs[15] +#define NVCSI_CIL_A_DPHY_DESKEW_CLK_CALIB_STATUS_HIGH_0_0_OFFSET regs[16] + +// only for t194+ +#define NVCSI_CIL_A_DPHY_DESKEW_RESULT_STATUS_OFFSET 0x64 +#define NVCSI_CIL_B_DPHY_DESKEW_RESULT_STATUS_OFFSET 0xf0 + +/* + * NVCSI_PHY_0_NVCSI_CIL_A_DESKEW_CONTROL_0 was introduced in T194 + * Use this register for DESKEW_COMPARE and DESKEW_SETTLE + */ +#define NVCSI_CIL_A_DESKEW_CONTROL_0_OFFSET regs[17] +#define NVCSI_CIL_A_CONTROL_0_OFFSET regs[18] + +/* + * bits in NVCSI_CIL_A_DESKEW_CONTROL_0/NVCSI_CIL_A_CONTROL_0 + * For T194, the THS_SETTLE control was split into + * THS_SETTLE0 and THS_SETTLE1 for per-lane control + */ +#define DESKEW_COMPARE regs[19] +#define DESKEW_COMPARE_SHIFT regs[20] +#define DESKEW_SETTLE regs[21] +#define DESKEW_SETTLE_SHIFT regs[22] +#define CLK_SETTLE regs[23] +#define CLK_SETTLE_SHIFT0 regs[24] +#define THS_SETTLE0 regs[25] +#define THS_SETTLE1 regs[26] +#define THS_SETTLE0_SHIFT regs[27] +#define THS_SETTLE1_SHIFT regs[28] + +#define NVCSI_CIL_B_DPHY_INADJ_CTRL_0_OFFSET regs[29] +#define NVCSI_CIL_B_CLK_DESKEW_CTRL_0_OFFSET regs[30] +#define NVCSI_CIL_B_DATA_DESKEW_CTRL_0_OFFSET regs[31] +#define NVCSI_CIL_B_DPHY_DESKEW_STATUS_0_OFFSET regs[32] +// same note as above for NVCSI_CIL_A_DESKEW_CONTROL_0 +#define NVCSI_CIL_B_DESKEW_CONTROL_0_OFFSET regs[33] +#define NVCSI_CIL_B_CONTROL_0_OFFSET regs[34] + +#define NVCSI_DPHY_CALIB_STATUS_IO_OFFSET 0x8 +#define NVCSI_PHY_OFFSET 0x10000 +#define NVCSI_CIL_B_OFFSET regs[35] + +#define REGS_COUNT 36 + +//////// + +#define DESKEW_TIMEOUT_MSEC 100 + +struct nvcsi_deskew_context { + unsigned int deskew_lanes; + struct task_struct *deskew_kthread; + struct completion thread_done; +}; + +#if IS_ENABLED(CONFIG_TEGRA_GRHOST_NVCSI) +int nvcsi_deskew_apply_check(struct nvcsi_deskew_context *ctx); +int nvcsi_deskew_setup(struct nvcsi_deskew_context *ctx); +#else +static int inline nvcsi_deskew_apply_check(struct nvcsi_deskew_context *ctx) +{ + return 0; +} +static int inline nvcsi_deskew_setup(struct nvcsi_deskew_context *ctx) +{ + return 0; +} +#endif + +void nvcsi_deskew_platform_setup(struct tegra_csi_device *dev, bool is_t19x); + +void deskew_dbgfs_calc_bound(struct seq_file *s, long long input_stats); +void deskew_dbgfs_deskew_stats(struct seq_file *s); + +#endif diff --git a/drivers/video/tegra/host/nvcsi/nvcsi.h b/drivers/video/tegra/host/nvcsi/nvcsi.h new file mode 100644 index 00000000..66fea12b --- /dev/null +++ b/drivers/video/tegra/host/nvcsi/nvcsi.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra Graphics Host NVCSI + * + * Copyright (c) 2015-2022 NVIDIA Corporation. All rights reserved. + */ + +#ifndef __NVHOST_NVCSI_H__ +#define __NVHOST_NVCSI_H__ + +#define CFG_ERR_STATUS2VI_MASK_VC3 (0x1 << 24) +#define CFG_ERR_STATUS2VI_MASK_VC2 (0x1 << 16) +#define CFG_ERR_STATUS2VI_MASK_VC1 (0x1 << 8) +#define CFG_ERR_STATUS2VI_MASK_VC0 (0x1 << 0) + +extern const struct file_operations tegra_nvcsi_ctrl_ops; + +int nvcsi_finalize_poweron(struct platform_device *pdev); +int nvcsi_prepare_poweroff(struct platform_device *pdev); + +#if IS_ENABLED(CONFIG_TEGRA_GRHOST_NVCSI) +int nvcsi_cil_sw_reset(int lanes, int enable); +#else +static int inline nvcsi_cil_sw_reset(int lanes, int enable) +{ + return 0; +} +#endif + +struct tegra_csi_device *tegra_get_mc_csi(void); +#endif diff --git a/include/linux/arm64-barrier.h b/include/linux/arm64-barrier.h new file mode 100644 index 00000000..ed3d5381 --- /dev/null +++ b/include/linux/arm64-barrier.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include + +/* + * Upstream Linux commit bd4fb6d270bc ("arm64: Add support for SB + * barrier and patch in over DSB; ISB sequences") added speculation + * macro 'spec_bar' to inhibit speculation. This has since been removed + * from the upstream kernel starting with Linux v5.13, because there are + * no current users. Define this macro here for NVIDIA drivers to use. + */ +#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ + SB_BARRIER_INSN"nop\n", \ + ARM64_HAS_SB)) diff --git a/include/linux/platform/tegra/bwmgr_mc.h b/include/linux/platform/tegra/bwmgr_mc.h new file mode 100644 index 00000000..3992f094 --- /dev/null +++ b/include/linux/platform/tegra/bwmgr_mc.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __BWMGR_MC_H +#define __BWMGR_MC_H + +#include +#include + +unsigned long bwmgr_apply_efficiency( + unsigned long bw, unsigned long iso_bw, + unsigned long emc_max, u64 usage_flags, + unsigned long *iso_bw_min, unsigned long iso_bw_nvdis, + unsigned long iso_bw_vi); + +void bwmgr_eff_init(void); + +unsigned long bwmgr_freq_to_bw(unsigned long freq); +unsigned long bwmgr_bw_to_freq(unsigned long bw); +unsigned long bwmgr_get_lowest_iso_emc_freq(long iso_bw, + long iso_bw_nvdis, long iso_bw_vi); +u32 tegra_bwmgr_get_max_iso_bw(enum tegra_iso_client); + +u32 bwmgr_dvfs_latency(u32 ufreq); +int bwmgr_iso_bw_percentage_max(void); +int bwmgr_get_emc_to_dram_freq_factor(void); +#endif /* __BWMGR_MC_H */ diff --git a/include/linux/platform/tegra/isomgr.h b/include/linux/platform/tegra/isomgr.h new file mode 100644 index 00000000..f5763d79 --- /dev/null +++ b/include/linux/platform/tegra/isomgr.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _INCLUDE_MACH_ISOMGR_H +#define _INCLUDE_MACH_ISOMGR_H + +#include +#include + +#define ISOMGR_MAGIC 0x150A1C + +/* handle to identify registered client */ +#define tegra_isomgr_handle void * + +/* callback to client to renegotiate ISO BW allocation */ +typedef void (*tegra_isomgr_renegotiate)(void *priv, + u32 avail_bw); /* KB/sec */ + +struct isoclient_info { + enum tegra_iso_client client; + char *name; + char *dev_name; + char *emc_clk_name; + enum tegra_bwmgr_client_id bwmgr_id; +}; + +struct isomgr_client { + u32 magic; /* magic to identify handle */ + struct kref kref; /* ref counting */ + s32 dedi_bw; /* BW dedicated to this client (KB/sec) */ + s32 rsvd_bw; /* BW reserved for this client (KB/sec) */ + s32 real_bw; /* BW realized for this client (KB/sec) */ + s32 lti; /* Client spec'd Latency Tolerance (usec) */ + s32 lto; /* MC calculated Latency Tolerance (usec) */ + s32 rsvd_mf; /* reserved minimum freq in support of LT */ + s32 real_mf; /* realized minimum freq in support of LT */ + s32 real_mf_rq; /* real_mf requested */ + tegra_isomgr_renegotiate renegotiate; /* ask client to renegotiate */ + bool realize; /* bw realization in progress */ + s32 sleep_bw; /* sleeping for realize */ + s32 margin_bw; /* BW set aside for this client (KB/sec) */ + u8 limit_bw_percentage; /* Insufficient HW buffers cause BW to be + * limited to this percentage of DRAM BW + */ + void *priv; /* client driver's private data */ + struct completion cmpl; /* so we can sleep waiting for delta BW */ + +#ifdef CONFIG_COMMON_CLK + struct tegra_bwmgr_client *bwmgr_handle; +#else + struct clk *emc_clk; /* client emc clk for bw */ +#endif + +#ifdef CONFIG_TEGRA_ISOMGR_SYSFS + struct kobject *client_kobj; + struct isomgr_client_attrs { + struct kobj_attribute dedi_bw; + struct kobj_attribute rsvd_bw; + struct kobj_attribute real_bw; + struct kobj_attribute lti; + struct kobj_attribute lto; + struct kobj_attribute rsvd_mf; + struct kobj_attribute real_mf; + struct kobj_attribute sleep_bw; + struct kobj_attribute margin_bw; + } client_attrs; +#endif /* CONFIG_TEGRA_ISOMGR_SYSFS */ +}; + +struct isomgr { + struct mutex lock; /* to lock ALL isomgr state */ + struct task_struct *task; /* check reentrant/mismatched locks */ + +#ifdef CONFIG_COMMON_CLK + struct tegra_bwmgr_client *bwmgr_handle; +#else + struct clk *emc_clk; /* isomgr emc clock for floor freq */ +#endif + + s32 lt_mf; /* min freq to support worst LT */ + s32 lt_mf_rq; /* requested lt_mf */ + s32 avail_bw; /* globally available MC BW */ + s32 dedi_bw; /* total BW 'dedicated' to clients */ + s32 sleep_bw; /* pending bw requirement */ + u32 max_iso_bw; /* max ISO BW MC can accommodate */ + struct kobject *kobj; /* for sysfs linkage */ + struct isomgr_ops *ops; /* ops structure for isomgr*/ +}; + +extern struct isoclient_info *isoclient_info; +/*platform specific flag for requesting max emc floor req for camera client*/ +extern u8 isomgr_camera_max_floor_req; +extern int isoclients; +extern bool client_valid[TEGRA_ISO_CLIENT_COUNT]; +extern struct isomgr_client isomgr_clients[TEGRA_ISO_CLIENT_COUNT]; +extern struct isomgr isomgr; +extern char *cname[]; + +struct isomgr_ops { + void (*isomgr_plat_init)(void); + bool (*isomgr_plat_register)(u32 dedi_bw, + enum tegra_iso_client client); + void (*isomgr_plat_unregister)(struct isomgr_client *cp); + bool (*isomgr_plat_reserve)(struct isomgr_client *cp, + u32 bw, enum tegra_iso_client client); + bool (*isomgr_plat_realize)(struct isomgr_client *cp); + u32 (*isomgr_max_iso_bw)(enum tegra_iso_client client); +}; + +struct isomgr_ops *pre_t19x_isomgr_init(void); +struct isomgr_ops *t19x_isomgr_init(void); + +#if defined(CONFIG_TEGRA_ISOMGR) +/* Register an ISO BW client */ +tegra_isomgr_handle tegra_isomgr_register(enum tegra_iso_client client, + u32 dedicated_bw, /* KB/sec */ + tegra_isomgr_renegotiate renegotiate, + void *priv); + +/* Unregister an ISO BW client */ +void tegra_isomgr_unregister(tegra_isomgr_handle handle); + +/* Return the initialization status of isomgr */ +bool tegra_isomgr_init_status(void); + +/* Reserve ISO BW on behalf of client - don't apply, rval is dvfs thresh usec */ +u32 tegra_isomgr_reserve(tegra_isomgr_handle handle, + u32 bw, /* KB/sec */ + u32 lt); /* usec */ + +/* Realize client reservation - apply settings, rval is dvfs thresh usec */ +u32 tegra_isomgr_realize(tegra_isomgr_handle handle); + +/* This sets bw aside for the client specified. */ +int tegra_isomgr_set_margin(enum tegra_iso_client client, u32 bw, bool wait); + +int tegra_isomgr_get_imp_time(enum tegra_iso_client, u32 bw); + +/* returns available in iso bw in KB/sec */ +u32 tegra_isomgr_get_available_iso_bw(void); + +/* returns total iso bw in KB/sec */ +u32 tegra_isomgr_get_total_iso_bw(enum tegra_iso_client client); + +/* Initialize isomgr. + * This api would be called by .init_machine during boot. + * isomgr clients, don't call this api. + */ +int __init isomgr_init(void); +#else +static inline tegra_isomgr_handle tegra_isomgr_register( + enum tegra_iso_client client, + u32 dedicated_bw, + tegra_isomgr_renegotiate renegotiate, + void *priv) +{ + /* return a dummy handle to allow client function + * as if isomgr were enabled. + */ + return (tegra_isomgr_handle)1; +} + +static inline void tegra_isomgr_unregister(tegra_isomgr_handle handle) {} + +static inline u32 tegra_isomgr_reserve(tegra_isomgr_handle handle, + u32 bw, u32 lt) +{ + return 1; +} + +static inline u32 tegra_isomgr_realize(tegra_isomgr_handle handle) +{ + return 1; +} + +static inline int tegra_isomgr_set_margin(enum tegra_iso_client client, u32 bw) +{ + return 0; +} + +static inline int tegra_isomgr_get_imp_time(enum tegra_iso_client client, + u32 bw) +{ + return 0; +} + +static inline u32 tegra_isomgr_get_available_iso_bw(void) +{ + return UINT_MAX; +} + +static inline u32 tegra_isomgr_get_total_iso_bw(enum tegra_iso_client client) +{ + return UINT_MAX; +} + +static inline int isomgr_init(void) +{ + return 0; +} +#endif +#endif /* _INCLUDE_MACH_ISOMGR_H */ diff --git a/include/linux/platform/tegra/latency_allowance.h b/include/linux/platform/tegra/latency_allowance.h new file mode 100644 index 00000000..e0545799 --- /dev/null +++ b/include/linux/platform/tegra/latency_allowance.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2011-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _MACH_TEGRA_LATENCY_ALLOWANCE_H_ +#define _MACH_TEGRA_LATENCY_ALLOWANCE_H_ + +#define FIRST_DISP_CLIENT_ID TEGRA_LA_DISPLAY_0A +#define DISP_CLIENT_LA_ID(id) (id - FIRST_DISP_CLIENT_ID) + + +/* Note:- When adding new display realted IDs, please add them adjacent/amongst + the existing display related IDs. This is required because certain + display related macros/strcuts assume that all display related + tegra_la_ids are adjacent to each other. + + Please observe the same guidelines as display clients, when adding new + camera clients. All camera clients need to be located adjacent to each + other in tegra_la_id. This is required because certain camera related + macros/structs assume that all camera related tegra_la_ids are + adjacent to each other. */ +enum tegra_la_id { + TEGRA_LA_AFIR = 0, /* T30 specific */ + TEGRA_LA_AFIW, /* T30 specific */ + TEGRA_LA_AVPC_ARM7R, + TEGRA_LA_AVPC_ARM7W, + TEGRA_LA_DISPLAY_0A, + TEGRA_LA_DISPLAY_0B, + TEGRA_LA_DISPLAY_0C, + TEGRA_LA_DISPLAY_1B, /* T30 specific */ + TEGRA_LA_DISPLAY_HC, + TEGRA_LA_DISPLAY_0AB, + TEGRA_LA_DISPLAY_0BB, + TEGRA_LA_DISPLAY_0CB, + TEGRA_LA_DISPLAY_1BB, /* T30 specific */ + TEGRA_LA_DISPLAY_HCB, + TEGRA_LA_DISPLAY_T, /* T14x specific */ + TEGRA_LA_DISPLAYD, /* T14x specific */ + TEGRA_LA_EPPUP, + TEGRA_LA_EPPU, + TEGRA_LA_EPPV, + TEGRA_LA_EPPY, + TEGRA_LA_G2PR, + TEGRA_LA_G2SR, + TEGRA_LA_G2DR, + TEGRA_LA_G2DW, + TEGRA_LA_GPUSRD, /* T12x specific */ + TEGRA_LA_GPUSWR, /* T12x specific */ + TEGRA_LA_HOST1X_DMAR, + TEGRA_LA_HOST1XR, + TEGRA_LA_HOST1XW, + TEGRA_LA_HDAR, + TEGRA_LA_HDAW, + TEGRA_LA_ISPW, + TEGRA_LA_MPCORER, + TEGRA_LA_MPCOREW, + TEGRA_LA_MPCORE_LPR, + TEGRA_LA_MPCORE_LPW, + TEGRA_LA_MPE_UNIFBR, /* T30 specific */ + TEGRA_LA_MPE_IPRED, /* T30 specific */ + TEGRA_LA_MPE_AMEMRD, /* T30 specific */ + TEGRA_LA_MPE_CSRD, /* T30 specific */ + TEGRA_LA_MPE_UNIFBW, /* T30 specific */ + TEGRA_LA_MPE_CSWR, /* T30 specific */ + TEGRA_LA_FDCDRD, + TEGRA_LA_IDXSRD, + TEGRA_LA_TEXSRD, + TEGRA_LA_TEXL2SRD = TEGRA_LA_TEXSRD, /* T11x, T14x specific */ + TEGRA_LA_FDCDWR, + TEGRA_LA_FDCDRD2, + TEGRA_LA_IDXSRD2, /* T30 specific */ + TEGRA_LA_TEXSRD2, /* T30 specific */ + TEGRA_LA_FDCDWR2, + TEGRA_LA_PPCS_AHBDMAR, + TEGRA_LA_PPCS_AHBSLVR, + TEGRA_LA_PPCS_AHBDMAW, + TEGRA_LA_PPCS_AHBSLVW, + TEGRA_LA_PTCR, + TEGRA_LA_SATAR, /* T30, T19x */ + TEGRA_LA_SATAW, /* T30, T19x */ + TEGRA_LA_VDE_BSEVR, + TEGRA_LA_VDE_MBER, + TEGRA_LA_VDE_MCER, + TEGRA_LA_VDE_TPER, + TEGRA_LA_VDE_BSEVW, + TEGRA_LA_VDE_DBGW, + TEGRA_LA_VDE_MBEW, + TEGRA_LA_VDE_TPMW, + TEGRA_LA_VI_RUV, /* T30 specific */ + TEGRA_LA_VI_WSB, + TEGRA_LA_VI_WU, + TEGRA_LA_VI_WV, + TEGRA_LA_VI_WY, + + TEGRA_LA_MSENCSRD, /* T11x, T14x specific */ + TEGRA_LA_MSENCSWR, /* T11x, T14x specific */ + TEGRA_LA_XUSB_HOSTR, /* T11x, T19x */ + TEGRA_LA_XUSB_HOSTW, /* T11x, T19x */ + TEGRA_LA_XUSB_DEVR, /* T11x, T19x */ + TEGRA_LA_XUSB_DEVW, /* T11x, T19x */ + TEGRA_LA_FDCDRD3, /* T11x specific */ + TEGRA_LA_FDCDRD4, /* T11x specific */ + TEGRA_LA_FDCDWR3, /* T11x specific */ + TEGRA_LA_FDCDWR4, /* T11x specific */ + TEGRA_LA_EMUCIFR, /* T11x, T14x specific */ + TEGRA_LA_EMUCIFW, /* T11x, T14x specific */ + TEGRA_LA_TSECSRD, /* T11x, T14x, T19x */ + TEGRA_LA_TSECSWR, /* T11x, T14x, T19x */ + + TEGRA_LA_VI_W, /* T14x specific */ + TEGRA_LA_ISP_RA, /* T14x specific */ + TEGRA_LA_ISP_WA, /* T14x specific */ + TEGRA_LA_ISP_WB, /* T14x specific */ + TEGRA_LA_ISP_RAB, /* T12x specific */ + TEGRA_LA_ISP_WAB, /* T12x specific */ + TEGRA_LA_ISP_WBB, /* T12x specific */ + TEGRA_LA_BBCR, /* T14x specific */ + TEGRA_LA_BBCW, /* T14x specific */ + TEGRA_LA_BBCLLR, /* T14x specific */ + TEGRA_LA_SDMMCR, /* T12x, T19x */ + TEGRA_LA_SDMMCRA, /* T12x, T19x */ + TEGRA_LA_SDMMCRAA, /* T12x specific */ + TEGRA_LA_SDMMCRAB, /* T12x, T19x */ + TEGRA_LA_SDMMCW, /* T12x, T19x */ + TEGRA_LA_SDMMCWA, /* T12x, T19x */ + TEGRA_LA_SDMMCWAA, /* T12x specific */ + TEGRA_LA_SDMMCWAB, /* T12x, T19x */ + TEGRA_LA_VICSRD, /* T12x, T19x */ + TEGRA_LA_VICSWR, /* T12x, T19x */ + + TEGRA_LA_TSECBSRD, /* T21x specific */ + TEGRA_LA_TSECBSWR, /* T21x specific */ + + TEGRA_LA_NVDECR, /* T21x specific */ + TEGRA_LA_NVDECW, /* T21x specific */ + + TEGRA_LA_AONR, /* T18x, T19x */ + TEGRA_LA_AONW, /* T18x, T19x */ + TEGRA_LA_AONDMAR, /* T18x, T19x */ + TEGRA_LA_AONDMAW, /* T18x, T19x */ + TEGRA_LA_APEDMAR, /* T18x, T19x */ + TEGRA_LA_APEDMAW, /* T18x, T19x */ + TEGRA_LA_APER, /* T18x, T19x */ + TEGRA_LA_APEW, /* T18x, T19x */ + TEGRA_LA_AXISR, /* T18x, T19x */ + TEGRA_LA_AXISW, /* T18x, T19x */ + TEGRA_LA_BPMPR, /* T18x, T19x */ + TEGRA_LA_BPMPW, /* T18x, T19x */ + TEGRA_LA_BPMPDMAR, /* T18x, T19x */ + TEGRA_LA_BPMPDMAW, /* T18x, T19x */ + TEGRA_LA_EQOSR, /* T18x, T19x */ + TEGRA_LA_EQOSW, /* T18x, T19x */ + TEGRA_LA_ETRR, /* T18x, T19x */ + TEGRA_LA_ETRW, /* T18x, T19x */ + TEGRA_LA_GPUSRD2, /* T18x specific */ + TEGRA_LA_GPUSWR2, /* T18x specific */ + TEGRA_LA_NVDISPLAYR, /* T18x, T19x */ + TEGRA_LA_NVENCSRD, /* T18x, T19x */ + TEGRA_LA_NVENCSWR, /* T18x, T19x */ + TEGRA_LA_NVJPGSRD, /* T18x, T19x */ + TEGRA_LA_NVJPGSWR, /* T18x, T19x */ + TEGRA_LA_SCER, /* T18x, T19x */ + TEGRA_LA_SCEW, /* T18x, T19x */ + TEGRA_LA_SCEDMAR, /* T18x, T19x */ + TEGRA_LA_SCEDMAW, /* T18x, T19x */ + TEGRA_LA_SESRD, /* T18x, T19x */ + TEGRA_LA_SESWR, /* T18x, T19x */ + TEGRA_LA_UFSHCR, /* T18x, T19x */ + TEGRA_LA_UFSHCW, /* T18x, T19x */ + + TEGRA_LA_AXIAPR, /* T19x specific */ + TEGRA_LA_AXIAPW, /* T19x specific */ + TEGRA_LA_CIFLL_WR, /* T19x specific */ + TEGRA_LA_DLA0FALRDB, /* T19x specific */ + TEGRA_LA_DLA0RDA, /* T19x specific */ + TEGRA_LA_DLA0FALWRB, /* T19x specific */ + TEGRA_LA_DLA0WRA, /* T19x specific */ + TEGRA_LA_DLA0RDA1, /* T19x specific */ + TEGRA_LA_DLA1RDA1, /* T19x specific */ + TEGRA_LA_DLA1FALRDB, /* T19x specific */ + TEGRA_LA_DLA1RDA, /* T19x specific */ + TEGRA_LA_DLA1FALWRB, /* T19x specific */ + TEGRA_LA_DLA1WRA, /* T19x specific */ + TEGRA_LA_HOST1XDMAR, /* T19x specific */ + TEGRA_LA_ISPFALR, /* T19x specific */ + TEGRA_LA_ISPRA, /* T19x specific */ + TEGRA_LA_ISPWA, /* T19x specific */ + TEGRA_LA_ISPWB, /* T19x specific */ + TEGRA_LA_ISPFALW, /* T19x specific */ + TEGRA_LA_ISPRA1, /* T19x specific */ + TEGRA_LA_MIU0R, /* T19x specific */ + TEGRA_LA_MIU0W, /* T19x specific */ + TEGRA_LA_MIU1R, /* T19x specific */ + TEGRA_LA_MIU1W, /* T19x specific */ + TEGRA_LA_MIU2R, /* T19x specific */ + TEGRA_LA_MIU2W, /* T19x specific */ + TEGRA_LA_MIU3R, /* T19x specific */ + TEGRA_LA_MIU3W, /* T19x specific */ + TEGRA_LA_MIU4R, /* T19x specific */ + TEGRA_LA_MIU4W, /* T19x specific */ + TEGRA_LA_MIU5R, /* T19x specific */ + TEGRA_LA_MIU5W, /* T19x specific */ + TEGRA_LA_MIU6R, /* T19x specific */ + TEGRA_LA_MIU6W, /* T19x specific */ + TEGRA_LA_MIU7R, /* T19x specific */ + TEGRA_LA_MIU7W, /* T19x specific */ + TEGRA_LA_NVDECSRD, /* T19x specific */ + TEGRA_LA_NVDECSWR, /* T19x specific */ + TEGRA_LA_NVDEC1SRD, /* T19x specific */ + TEGRA_LA_NVDECSRD1, /* T19x specific */ + TEGRA_LA_NVDEC1SRD1, /* T19x specific */ + TEGRA_LA_NVDEC1SWR, /* T19x specific */ + TEGRA_LA_NVENC1SRD, /* T19x specific */ + TEGRA_LA_NVENC1SWR, /* T19x specific */ + TEGRA_LA_NVENC1SRD1, /* T19x specific */ + TEGRA_LA_NVENCSRD1, /* T19x specific */ + TEGRA_LA_PCIE0R, /* T19x specific */ + TEGRA_LA_PCIE0W, /* T19x specific */ + TEGRA_LA_PCIE1R, /* T19x specific */ + TEGRA_LA_PCIE1W, /* T19x specific */ + TEGRA_LA_PCIE2AR, /* T19x specific */ + TEGRA_LA_PCIE2AW, /* T19x specific */ + TEGRA_LA_PCIE3R, /* T19x specific */ + TEGRA_LA_PCIE3W, /* T19x specific */ + TEGRA_LA_PCIE4R, /* T19x specific */ + TEGRA_LA_PCIE4W, /* T19x specific */ + TEGRA_LA_PCIE5R, /* T19x specific */ + TEGRA_LA_PCIE5W, /* T19x specific */ + TEGRA_LA_PCIE0R1, /* T19x specific */ + TEGRA_LA_PCIE5R1, /* T19x specific */ + TEGRA_LA_PVA0RDA, /* T19x specific */ + TEGRA_LA_PVA0RDB, /* T19x specific */ + TEGRA_LA_PVA0RDC, /* T19x specific */ + TEGRA_LA_PVA0WRA, /* T19x specific */ + TEGRA_LA_PVA0WRB, /* T19x specific */ + TEGRA_LA_PVA0WRC, /* T19x specific */ + TEGRA_LA_PVA0RDA1, /* T19x specific */ + TEGRA_LA_PVA0RDB1, /* T19x specific */ + TEGRA_LA_PVA1RDA, /* T19x specific */ + TEGRA_LA_PVA1RDB, /* T19x specific */ + TEGRA_LA_PVA1RDC, /* T19x specific */ + TEGRA_LA_PVA1WRA, /* T19x specific */ + TEGRA_LA_PVA1WRB, /* T19x specific */ + TEGRA_LA_PVA1WRC, /* T19x specific */ + TEGRA_LA_PVA1RDA1, /* T19x specific */ + TEGRA_LA_PVA1RDB1, /* T19x specific */ + TEGRA_LA_RCEDMAR, /* T19x specific */ + TEGRA_LA_RCEDMAW, /* T19x specific */ + TEGRA_LA_RCER, /* T19x specific */ + TEGRA_LA_RCEW, /* T19x specific */ + TEGRA_LA_TSECSRDB, /* T19x specific */ + TEGRA_LA_TSECSWRB, /* T19x specific */ + TEGRA_LA_VIW, /* T19x specific */ + TEGRA_LA_VICSRD1, /* T19x specific */ + TEGRA_LA_VIFALR, /* T19x specific */ + TEGRA_LA_VIFALW, /* T19x specific */ + TEGRA_LA_WCAM, /* T19x specific */ + TEGRA_LA_NVLRHP, /* T19x specific */ + TEGRA_LA_DGPU, /* T19x specific */ + TEGRA_LA_IGPU, /* T19x specific */ + + TEGRA_LA_MAX_ID +}; + +enum disp_win_type { + TEGRA_LA_DISP_WIN_TYPE_FULL, + TEGRA_LA_DISP_WIN_TYPE_FULLA, + TEGRA_LA_DISP_WIN_TYPE_FULLB, + TEGRA_LA_DISP_WIN_TYPE_SIMPLE, + TEGRA_LA_DISP_WIN_TYPE_CURSOR, + TEGRA_LA_DISP_WIN_TYPE_NUM_TYPES +}; + +struct disp_client { + enum disp_win_type win_type; + unsigned int mccif_size_bytes; + unsigned int line_buf_sz_bytes; +}; + +struct dc_to_la_params { + unsigned int thresh_lwm_bytes; + unsigned int spool_up_buffering_adj_bytes; + unsigned int drain_time_usec_fp; + unsigned int total_dc0_bw; + unsigned int total_dc1_bw; +}; + +struct la_to_dc_params { + unsigned int fp_factor; + unsigned int (*la_real_to_fp)(unsigned int val); + unsigned int (*la_fp_to_real)(unsigned int val); + unsigned int static_la_minus_snap_arb_to_row_srt_emcclks_fp; + unsigned int dram_width_bits; + unsigned int disp_catchup_factor_fp; +}; + +int tegra_set_disp_latency_allowance(enum tegra_la_id id, + unsigned long emc_freq_hz, + unsigned int bandwidth_in_mbps, + struct dc_to_la_params disp_params); + +int tegra_check_disp_latency_allowance(enum tegra_la_id id, + unsigned long emc_freq_hz, + unsigned int bw_mbps, + struct dc_to_la_params disp_params); + +int tegra_set_latency_allowance(enum tegra_la_id id, + unsigned int bandwidth_in_mbps); + +int tegra_set_camera_ptsa(enum tegra_la_id id, + unsigned int bw_mbps, + int is_hiso); + +void tegra_latency_allowance_update_tick_length(unsigned int new_ns_per_tick); + +int tegra_enable_latency_scaling(enum tegra_la_id id, + unsigned int threshold_low, + unsigned int threshold_mid, + unsigned int threshold_high); + +void tegra_disable_latency_scaling(enum tegra_la_id id); + +void mc_pcie_init(void); + +struct la_to_dc_params tegra_get_la_to_dc_params(void); + +extern const struct disp_client *tegra_la_disp_clients_info; + +#endif /* _MACH_TEGRA_LATENCY_ALLOWANCE_H_ */ diff --git a/include/media/camera_common.h b/include/media/camera_common.h new file mode 100644 index 00000000..e187317b --- /dev/null +++ b/include/media/camera_common.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * camera_common.h - utilities for tegra camera driver + * + * Copyright (c) 2015-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __camera_common__ +#define __camera_common__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * Scaling factor for converting a Q10.22 fixed point value + * back to its original floating point value + */ +#define FIXED_POINT_SCALING_FACTOR (1ULL << 22) + +struct reg_8 { + u16 addr; + u8 val; +}; + +struct reg_16 { + u16 addr; + u16 val; +}; + +struct camera_common_power_rail { + struct regulator *dvdd; + struct regulator *avdd; + struct regulator *iovdd; + struct regulator *vcmvdd; + struct clk *mclk; + unsigned int pwdn_gpio; + unsigned int reset_gpio; + unsigned int af_gpio; + bool state; +}; + +struct camera_common_regulators { + const char *avdd; + const char *dvdd; + const char *iovdd; + const char *vcmvdd; +}; + +struct camera_common_pdata { + const char *mclk_name; /* NULL for default default_mclk */ + const char *parentclk_name; /* NULL for no parent clock*/ + unsigned int pwdn_gpio; + unsigned int reset_gpio; + unsigned int af_gpio; + bool ext_reg; + int (*power_on)(struct camera_common_power_rail *pw); + int (*power_off)(struct camera_common_power_rail *pw); + struct camera_common_regulators regulators; + bool use_cam_gpio; + bool has_eeprom; + bool v_flip; + bool h_mirror; + unsigned int fuse_id_addr; + unsigned int avdd_latency; +}; + +struct camera_common_eeprom_data { + struct i2c_client *i2c_client; + struct i2c_adapter *adap; + struct i2c_board_info brd; + struct regmap *regmap; +}; + +int +regmap_util_write_table_8(struct regmap *regmap, + const struct reg_8 table[], + const struct reg_8 override_list[], + int num_override_regs, + u16 wait_ms_addr, u16 end_addr); + +int +regmap_util_write_table_16_as_8(struct regmap *regmap, + const struct reg_16 table[], + const struct reg_16 override_list[], + int num_override_regs, + u16 wait_ms_addr, u16 end_addr); + +enum switch_state { + SWITCH_OFF, + SWITCH_ON, +}; + +static const s64 switch_ctrl_qmenu[] = { + SWITCH_OFF, SWITCH_ON +}; + +/* + * The memory buffers allocated from nvrm are aligned to + * fullfill the hardware requirements: + * - size in alignment with a multiple of 128K/64K bytes, + * see CL http://git-master/r/256468 and bug 1321091. + */ +static const s64 size_align_ctrl_qmenu[] = { + 1, (64 * 1024), (128 * 1024), +}; + +struct camera_common_frmfmt { + struct v4l2_frmsize_discrete size; + const int *framerates; + int num_framerates; + bool hdr_en; + int mode; +}; + +struct camera_common_colorfmt { + unsigned int code; + enum v4l2_colorspace colorspace; + int pix_fmt; + enum v4l2_xfer_func xfer_func; + enum v4l2_ycbcr_encoding ycbcr_enc; + enum v4l2_quantization quantization; +}; + +struct camera_common_framesync { + u32 inck; /* kHz */ + u32 xhs; /* in inck */ + u32 xvs; /* in xhs */ + u32 fps; /* frames in 1000 second */ +}; + +struct tegracam_device ; +struct camera_common_data; + +struct camera_common_sensor_ops { + u32 numfrmfmts; + const struct camera_common_frmfmt *frmfmt_table; + int (*power_on)(struct camera_common_data *s_data); + int (*power_off)(struct camera_common_data *s_data); + int (*write_reg)(struct camera_common_data *s_data, + u16 addr, u8 val); + int (*read_reg)(struct camera_common_data *s_data, + u16 addr, u8 *val); + struct camera_common_pdata *(*parse_dt)(struct tegracam_device *tc_dev); + int (*power_get)(struct tegracam_device *tc_dev); + int (*power_put)(struct tegracam_device *tc_dev); + int (*get_framesync)(struct camera_common_data *s_data, + struct camera_common_framesync *vshs); + int (*set_mode)(struct tegracam_device *tc_dev); + int (*start_streaming)(struct tegracam_device *tc_dev); + int (*stop_streaming)(struct tegracam_device *tc_dev); +}; + +struct tegracam_sensor_data { + struct sensor_blob mode_blob; + struct sensor_blob ctrls_blob; +}; + +struct tegracam_ctrl_ops { + u32 numctrls; + u32 string_ctrl_size[TEGRA_CAM_MAX_STRING_CONTROLS]; + u32 compound_ctrl_size[TEGRA_CAM_MAX_COMPOUND_CONTROLS]; + const u32 *ctrl_cid_list; + bool is_blob_supported; + int (*set_gain)(struct tegracam_device *tc_dev, s64 val); + int (*set_exposure)(struct tegracam_device *tc_dev, s64 val); + int (*set_exposure_short)(struct tegracam_device *tc_dev, s64 val); + int (*set_frame_rate)(struct tegracam_device *tc_dev, s64 val); + int (*set_group_hold)(struct tegracam_device *tc_dev, bool val); + int (*fill_string_ctrl)(struct tegracam_device *tc_dev, + struct v4l2_ctrl *ctrl); + int (*fill_compound_ctrl)(struct tegracam_device *tc_dev, + struct v4l2_ctrl *ctrl); + int (*set_gain_ex)(struct tegracam_device *tc_dev, + struct sensor_blob *blob, s64 val); + int (*set_exposure_ex)(struct tegracam_device *tc_dev, + struct sensor_blob *blob, s64 val); + int (*set_frame_rate_ex)(struct tegracam_device *tc_dev, + struct sensor_blob *blob, s64 val); + int (*set_group_hold_ex)(struct tegracam_device *tc_dev, + struct sensor_blob *blob, bool val); +}; + +struct tegracam_ctrl_handler { + struct v4l2_ctrl_handler ctrl_handler; + const struct tegracam_ctrl_ops *ctrl_ops; + struct tegracam_device *tc_dev; + struct tegracam_sensor_data sensor_data; + + int numctrls; + struct v4l2_ctrl *ctrls[MAX_CID_CONTROLS]; +}; + +struct camera_common_data { + struct camera_common_sensor_ops *ops; + struct v4l2_ctrl_handler *ctrl_handler; + struct device *dev; + const struct camera_common_frmfmt *frmfmt; + const struct camera_common_colorfmt *colorfmt; + struct dentry *debugdir; + struct camera_common_power_rail *power; + + struct v4l2_subdev subdev; + struct v4l2_ctrl **ctrls; + struct module *owner; + + struct sensor_properties sensor_props; + /* TODO: cleanup neeeded once all the sensors adapt new framework */ + struct tegracam_ctrl_handler *tegracam_ctrl_hdl; + struct regmap *regmap; + struct camera_common_pdata *pdata; + /* TODO: cleanup needed for priv once all the sensors adapt new framework */ + void *priv; + int numctrls; + int csi_port; + int numlanes; + int mode; + int mode_prop_idx; + int numfmts; + int def_mode, def_width, def_height; + int def_clk_freq; + int fmt_width, fmt_height; + int sensor_mode_id; + bool use_sensor_mode_id; + bool override_enable; + u32 version; +}; + +struct camera_common_focuser_data; + +struct camera_common_focuser_ops { + int (*power_on)(struct camera_common_focuser_data *s_data); + int (*power_off)(struct camera_common_focuser_data *s_data); + int (*load_config)(struct camera_common_focuser_data *s_data); + int (*ctrls_init)(struct camera_common_focuser_data *s_data); +}; + +struct camera_common_focuser_data { + struct camera_common_focuser_ops *ops; + struct v4l2_ctrl_handler *ctrl_handler; + struct v4l2_subdev subdev; + struct v4l2_ctrl **ctrls; + struct device *dev; + + struct nv_focuser_config config; + void *priv; + int pwr_dev; + int def_position; +}; + +static inline void msleep_range(unsigned int delay_base) +{ + usleep_range(delay_base * 1000, delay_base * 1000 + 500); +} + +static inline struct camera_common_data *to_camera_common_data( + const struct device *dev) +{ + if (sensor_common_parse_num_modes(dev)) + return container_of(dev_get_drvdata(dev), + struct camera_common_data, subdev); + return NULL; +} + +static inline struct camera_common_focuser_data *to_camera_common_focuser_data( + const struct device *dev) +{ + return container_of(dev_get_drvdata(dev), + struct camera_common_focuser_data, subdev); +} + +int camera_common_g_ctrl(struct camera_common_data *s_data, + struct v4l2_control *control); + +int camera_common_regulator_get(struct device *dev, + struct regulator **vreg, const char *vreg_name); +int camera_common_parse_clocks(struct device *dev, + struct camera_common_pdata *pdata); +int camera_common_parse_ports(struct device *dev, + struct camera_common_data *s_data); +int camera_common_mclk_enable(struct camera_common_data *s_data); +void camera_common_mclk_disable(struct camera_common_data *s_data); +int camera_common_parse_general_properties(struct device *dev, + struct camera_common_data *s_data); + +int camera_common_debugfs_show(struct seq_file *s, void *unused); +ssize_t camera_common_debugfs_write( + struct file *file, + char const __user *buf, + size_t count, + loff_t *offset); +int camera_common_debugfs_open(struct inode *inode, struct file *file); +void camera_common_remove_debugfs(struct camera_common_data *s_data); +void camera_common_create_debugfs(struct camera_common_data *s_data, + const char *name); + +const struct camera_common_colorfmt *camera_common_find_datafmt( + unsigned int code); +int camera_common_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code); +int camera_common_enum_fmt(struct v4l2_subdev *sd, unsigned int index, + unsigned int *code); +int camera_common_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf); +int camera_common_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf); +int camera_common_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf); +int camera_common_enum_framesizes(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse); +int camera_common_enum_frameintervals(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_interval_enum *fie); +int camera_common_set_power(struct camera_common_data *data, int on); +int camera_common_s_power(struct v4l2_subdev *sd, int on); +void camera_common_dpd_disable(struct camera_common_data *s_data); +void camera_common_dpd_enable(struct camera_common_data *s_data); +int camera_common_get_mbus_config(struct v4l2_subdev *sd, + unsigned int pad, + struct v4l2_mbus_config *cfg); +int camera_common_get_framesync(struct v4l2_subdev *sd, + struct camera_common_framesync *vshs); + +/* Common initialize and cleanup for camera */ +int camera_common_initialize(struct camera_common_data *s_data, + const char *dev_name); +void camera_common_cleanup(struct camera_common_data *s_data); + +/* Focuser */ +int camera_common_focuser_init(struct camera_common_focuser_data *s_data); +int camera_common_focuser_s_power(struct v4l2_subdev *sd, int on); + +const struct camera_common_colorfmt *camera_common_find_pixelfmt( + unsigned int pix_fmt); + +/* common control layer init */ +int tegracam_ctrl_set_overrides(struct tegracam_ctrl_handler *handler); +int tegracam_ctrl_handler_init(struct tegracam_ctrl_handler *handler); +int tegracam_init_ctrl_ranges(struct tegracam_ctrl_handler *handler); +int tegracam_init_ctrl_ranges_by_mode( + struct tegracam_ctrl_handler *handler, + u32 modeidx); + +/* Regmap / RTCPU I2C driver interface */ +struct tegra_i2c_rtcpu_sensor; +struct tegra_i2c_rtcpu_config; + +struct camera_common_i2c { + struct regmap *regmap; + struct tegra_i2c_rtcpu_sensor *rt_sensor; +}; + +int camera_common_i2c_init( + struct camera_common_i2c *sensor, + struct i2c_client *client, + struct regmap_config *regmap_config, + const struct tegra_i2c_rtcpu_config *rtcpu_config); + +int camera_common_i2c_aggregate( + struct camera_common_i2c *sensor, + bool start); + +int camera_common_i2c_set_frame_id( + struct camera_common_i2c *sensor, + int frame_id); + +int camera_common_i2c_read_reg8( + struct camera_common_i2c *sensor, + unsigned int addr, + u8 *data, + unsigned int count); + +int camera_common_i2c_write_reg8( + struct camera_common_i2c *sensor, + unsigned int addr, + const u8 *data, + unsigned int count); + +int camera_common_i2c_write_table_8( + struct camera_common_i2c *sensor, + const struct reg_8 table[], + const struct reg_8 override_list[], + int num_override_regs, u16 wait_ms_addr, u16 end_addr); + +#endif /* __camera_common__ */ diff --git a/include/media/camera_version_utils.h b/include/media/camera_version_utils.h new file mode 100644 index 00000000..5dee4e48 --- /dev/null +++ b/include/media/camera_version_utils.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * camera_version_utils.h - utilities for different kernel versions + * camera driver supports + * + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __camera_version_utils__ +#define __camera_version_utils__ + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +int tegra_media_entity_init(struct media_entity *entity, u16 num_pads, + struct media_pad *pad, bool is_subdev, bool is_sensor); + +int tegra_media_create_link(struct media_entity *source, u16 source_pad, + struct media_entity *sink, u16 sink_pad, u32 flags); + +bool tegra_is_v4l2_subdev(struct media_entity *entity); + +bool tegra_v4l2_match_dv_timings(struct v4l2_dv_timings *t1, + struct v4l2_dv_timings *t2, + unsigned pclock_delta, + bool match_reduced_fps); + +int tegra_vb2_dma_init(struct device *dev, void **alloc_ctx, + unsigned int size, atomic_t *refcount); + +void tegra_vb2_dma_cleanup(struct device *dev, void *alloc_ctx, + atomic_t *refcount); + +#endif diff --git a/include/media/csi.h b/include/media/csi.h new file mode 100644 index 00000000..2f0a2d5d --- /dev/null +++ b/include/media/csi.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * NVIDIA Tegra CSI Device Header + * + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __CSI_H_ +#define __CSI_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "soc/tegra/camrtc-capture.h" + +#define MAX_CSI_BLOCK_LANES 4 +#define NUM_TPG_INSTANCE 6 + +#define csi_port_is_valid(port) (port > NVCSI_PORT_H ? 0 : 1) + +enum camera_gang_mode { + CAMERA_NO_GANG_MODE = 0, + CAMERA_GANG_L_R = 1, + CAMERA_GANG_T_B, + CAMERA_GANG_R_L, + CAMERA_GANG_B_T +}; + +struct tegra_channel; + +struct tpg_frmfmt { + struct v4l2_frmsize_discrete frmsize; + int pixel_format; + int framerate; + int h_blank; + int v_blank; +}; + +struct tegra_csi_port { + void __iomem *pixel_parser; + void __iomem *cil; + void __iomem *tpg; + + u32 csi_port; + u32 stream_id; + u32 virtual_channel_id; + + /* One pair of sink/source pad has one format */ + struct v4l2_mbus_framefmt format; + const struct tegra_video_format *core_format; + unsigned int lanes; + unsigned int framerate; + unsigned int h_blank; + unsigned int v_blank; +}; + +struct tegra_csi_device { + struct device *dev; + struct platform_device *pdev; + char devname[32]; + void __iomem *iomem_base; + void __iomem *iomem[3]; + struct clk *plld_dsi; + struct clk *plld; + + struct camera_common_data s_data[6]; + struct tegra_csi_port *ports; + struct media_pad *pads; + + unsigned int clk_freq; + int num_ports; + int num_channels; + struct list_head csi_chans; + struct tegra_csi_channel *tpg_start; + const struct tegra_csi_fops *fops; + const struct tpg_frmfmt *tpg_frmfmt_table; + unsigned int tpg_frmfmt_table_size; + bool tpg_gain_ctrl; + bool tpg_emb_data_config; + int (*get_tpg_settings)(struct tegra_csi_port *port, + union nvcsi_tpg_config *const tpg_config); + atomic_t power_ref; + + struct dentry *debugdir; + struct mutex source_update; + int tpg_active; + int sensor_active; + /* num_tpg_channels is a fixed number per soc*/ + int num_tpg_channels; +}; + +/* + * subdev: channel subdev + * numports: Number of CSI ports in use for this channel + * numlanes: Number of CIL lanes in use + */ +struct tegra_csi_channel { + struct list_head list; + struct v4l2_subdev subdev; + struct media_pad *pads; + struct media_pipeline pipe; + struct v4l2_subdev *sensor_sd; + + struct tegra_csi_device *csi; + struct tegra_csi_port *ports; + unsigned char port[TEGRA_CSI_BLOCKS]; + struct mutex format_lock; + unsigned int numports; + unsigned int numlanes; + unsigned int pg_mode; + struct camera_common_data *s_data; + unsigned int id; + atomic_t is_streaming; + + struct device_node *of_node; +}; + +static inline struct tegra_csi_channel *to_csi_chan(struct v4l2_subdev *subdev) +{ + return container_of(subdev, struct tegra_csi_channel, subdev); +} + +static inline struct tegra_csi_device *to_csi(struct v4l2_subdev *subdev) +{ + struct tegra_csi_channel *chan = to_csi_chan(subdev); + + return chan->csi; +} + +u32 read_phy_mode_from_dt(struct tegra_csi_channel *chan); +u32 read_settle_time_from_dt(struct tegra_csi_channel *chan); +u64 read_mipi_clk_from_dt(struct tegra_csi_channel *chan); +void set_csi_portinfo(struct tegra_csi_device *csi, + unsigned int port, unsigned int numlanes); +void tegra_csi_status(struct tegra_csi_channel *chan, int port_idx); +int tegra_csi_error(struct tegra_csi_channel *chan, int port_idx); +int tegra_csi_start_streaming(struct tegra_csi_channel *chan, int port_idx); +void tegra_csi_stop_streaming(struct tegra_csi_channel *chan, int port_idx); +int tegra_csi_tpg_set_gain(struct v4l2_subdev *sd, void *arg); +void tegra_csi_error_recover(struct tegra_csi_channel *chan, int port_idx); +int tegra_csi_init(struct tegra_csi_device *csi, + struct platform_device *pdev); +int tegra_csi_mipi_calibrate(struct tegra_csi_device *csi, + bool on); +int tegra_csi_media_controller_init(struct tegra_csi_device *csi, + struct platform_device *pdev); +int tegra_csi_media_controller_remove(struct tegra_csi_device *csi); +struct tegra_csi_device *tegra_get_mc_csi(void); +int tpg_csi_media_controller_init(struct tegra_csi_device *csi, int pg_mode); +void tpg_csi_media_controller_cleanup(struct tegra_csi_device *csi); +int tegra_csi_power(struct tegra_csi_device *csi, + struct tegra_csi_channel *chan, int enable); +int tegra_csi_error_recovery(struct tegra_channel *chan, + struct tegra_csi_device *csi, struct tegra_csi_channel *csi_chan); + +/* helper functions to calculate clock setting times */ +unsigned int tegra_csi_clk_settling_time( + struct tegra_csi_device *csi, + const unsigned int csicil_clk_mhz); +unsigned int tegra_csi_ths_settling_time( + struct tegra_csi_device *csi, + const unsigned int csicil_clk_mhz, + const unsigned int mipi_clk_mhz); +#endif diff --git a/include/media/csi4_registers.h b/include/media/csi4_registers.h new file mode 100644 index 00000000..561173d3 --- /dev/null +++ b/include/media/csi4_registers.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra 18x CSI register offsets + * + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __CSI4_REGISTERS_H__ +#define __CSI4_REGISTERS_H__ + +#define CSI4_BASE_ADDRESS 0x18000 +#define CSI4_PHY_OFFSET 0x10000 +#define CSI4_STREAM_OFFSET 0x800 + +#define CSI_PORTS (6) +#define PHY_BRICKS (3) + +/* NVCSI registers. Starts from 0x0 */ +#define CFG_NVCSI_INCR_SYNCPT_CNTRL 0x04 + +/* NVCSI_STREAM registers */ +#define TEGRA_CSI_STREAM_0_BASE 0x010000 +#define TEGRA_CSI_STREAM_1_BASE 0x010800 +#define TEGRA_CSI_STREAM_2_BASE 0x020000 +#define TEGRA_CSI_STREAM_3_BASE 0x020800 +#define TEGRA_CSI_STREAM_4_BASE 0x030000 +#define TEGRA_CSI_STREAM_5_BASE 0x030800 + +#define PP_EN_CTRL 0x08 +#define CFG_PP_EN (0x1 << 0) + +#define PPFSM_TIMEOUT_CTRL 0x6c +#define CFG_TIMEOUT_EN (0x1 << 31) +#define CFG_TIMEOUT_PERIOD (0x7fffffff << 0) + +#define VC0_DT_OVERRIDE 0x20 +#define CFG_VC0_DT_OVERRIDE_EN (0x1 << 31) +#define CFG_VC0_DT_OVERRIDE (0x3f << 0) + +#define PH_CHK_CTRL 0x70 +#define CFG_PH_CRC_CHK_EN (0x1 << 1) +#define CFG_PH_ECC_CHK_EN (0x1 << 0) + +#define VC0_DPCM_CTRL 0x74 +#define CFG_VC0_DPCM_COMPRESSION_RATIO (0xf << 0) + +#define ERROR_STATUS2VI_MASK 0x90 + +/* T186 TPG */ +#define TPG_EN_0 0x0b8 +#define cfg_tpg_en 0x1 +/* NVCSI_STREAM Legacy T210 PG*/ +#define PG_CTRL 0x194 +#define PG_MODE_OFFSET 2 +#define PG_ENABLE 0x1 +#define PG_DISABLE 0x0 +#define PG_BLANK 0x198 +#define PG_VBLANK_MASK 0xffff +#define PG_HBLANK_MASK 0xffff +#define PG_VBLANK_OFFSET 16 +#define PG_HBLANK_OFFSET 0 +#define PG_PHASE 0x19c +#define PG_RED_FREQ 0x1a0 +#define PG_VERT_INIT_FREQ_OFFSET 16 +#define PG_HOR_INIT_FREQ_OFFSET 0 +#define PG_RED_FREQ_RATE 0x1a4 +#define PG_GREEN_FREQ 0x1a8 +#define PG_GREEN_FREQ_RATE 0x1ac +#define PG_BLUE_FREQ 0x1b0 +#define PG_BLUE_FREQ_RATE 0X1b4 +#define PG_AOHDR 0x1b8 +#define PG_IMAGE_SIZE 0x1bc +#define HEIGHT_OFFSET 16 +#define PG_IMAGE_DT 0x1c0 + +/* TODO - double check if rr_status2vi_vc0:[0] means bit or value */ +#define ERROR_STATUS2VI_VC0 0x94 +#define ERROR_STATUS2VI_VC1 0x98 +#define ERROR_STATUS2VI_VC2 0x9c +#define ERROR_STATUS2VI_VC3 0xa0 +#define ERR_STATUS2VI_VC (0xf << 0) +#define ERR_PP_FSM_TIMEOUT (0) +#define ERR_PH_ECC_SINGLE_BIT (1) +#define ERR_PACKET_PAYLOAD_CRC (2) +#define ERR_PACKET_PAYLOAD_LESS (3) + +#define INTR_STATUS 0xa4 +#define INTR_MASK 0xa8 +#define PD_CRC_ERR_VC0 (0x1 << 2) +#define PH_ECC_SINGLE_BIT_ERR_VC0 (0x1 << 1) +#define PH_ECC_MULTI_BIT_ERR (0x1 << 16) +#define ERR_INTR_STATUS 0xac +#define ERR_INTR_MASK 0xb0 +#define MASK_PH_CRC_ERR (0x1 << 17) +#define MASK_PH_ECC_MULTI_BIT_ERR (0x1 << 16) +#define MASK_PD_WC_SHORT_ERR_VC3 (0x1 << 15) +#define MASK_PD_CRC_ERR_VC3 (0x1 << 14) +#define MASK_PH_ECC_SINGLE_BIT_ERR_VC3 (0x1 << 13) +#define MASK_PPFSM_TIMEOUT_VC3 (0x1 << 12) +#define MASK_PD_WC_SHORT_ERR_VC2 (0x1 << 11) +#define MASK_PD_CRC_ERR_VC2 (0x1 << 10) +#define MASK_PH_ECC_SINGLE_BIT_ERR_VC2 (0x1 << 9) +#define MASK_PPFSM_TIMEOUT_VC2 (0x1 << 8) +#define MASK_PD_WC_SHORT_ERR_VC1 (0x1 << 7) +#define MASK_PD_CRC_ERR_VC1 (0x1 << 6) +#define MASK_PH_ECC_SINGLE_BIT_ERR_VC1 (0x1 << 5) +#define MASK_PPFSM_TIMEOUT_VC1 (0x1 << 4) +#define MASK_PD_WC_SHORT_ERR_VC0 (0x1 << 3) +#define MASK_PD_CRC_ERR_VC0 (0x1 << 2) +#define MASK_PH_ECC_SINGLE_BIT_ERR_VC0 (0x1 << 1) +#define MASK_PPFSM_TIMEOUT_VC0 (0x1 << 0) +/* For ERR_INTR_MASK and ERR_INTR_MASK */ +#define MASK_HSM_INTR_SW_TRIGGER (0x1 << 18) + +/* NVCSI_PHY CIL registers */ +#define NVCSI_PHY_0_CILA_BASE 0x010400 +#define NVCSI_PHY_0_CILB_BASE 0x010C00 +#define NVCSI_PHY_1_CILA_BASE 0x020400 +#define NVCSI_PHY_1_CILB_BASE 0x020C00 +#define NVCSI_PHY_2_CILA_BASE 0x030400 +#define NVCSI_PHY_2_CILB_BASE 0x030C00 + +#define CILA_INTR_STATUS 0x400 +#define CILA_INTR_MASK 0x404 +#define CILA_ERR_INTR_STATUS 0x408 +#define CILA_ERR_INTR_MASK 0x40c +#define CILB_INTR_STATUS 0xc00 +#define CILB_INTR_MASK 0xc04 +#define CILB_ERR_INTR_STATUS 0xc08 +#define CILB_ERR_INTR_MASK 0xc0c + +/* NVCSI_PHY registers */ +#define NVCSI_CIL_PHY_CTRL 0x00 +#define CFG_PHY_MODE (0x1 << 0) +#define DPHY (0) +#define CPHY (1) + +#define NVCSI_CIL_CONFIG 0x04 +#define DATA_LANE_B_OFFSET 0x8 +#define DATA_LANE_A_OFFSET 0x0 +#define DATA_LANE_B (0x7 << DATA_LANE_B_OFFSET) +#define DATA_LANE_A (0x7 << DATA_LANE_A_OFFSET) + +#define NVCSI_CIL_PAD_CONFIG 0x0c +#define LOADADJ (0xf << 12) +#define PDVCLAMP (0x1 << 9) +#define E_VCLAMP (0x1 << 8) +#define SPARE_TOP (0xff << 0) + +#define NVCSI_CIL_A_SW_RESET 0x18 +#define NVCSI_CIL_B_SW_RESET 0x7c +#define SW_RESET1_EN (0x1 << 1) +#define SW_RESET0_EN (0x1 << 0) + +#define NVCSI_CIL_A_PAD_CONFIG 0x20 +#define NVCSI_CIL_B_PAD_CONFIG 0x84 +#define E_INPUT_LP_IO1_SHIFT 22 +#define E_INPUT_LP_IO0_SHIFT 21 +#define E_INPUT_LP_CLK_SHIFT 20 +#define E_INPUT_LP_IO1 (0x1 << 22) +#define E_INPUT_LP_IO0 (0x1 << 21) +#define E_INPUT_LP_CLK (0x1 << 20) +#define BANDWD_IN (0x1 << 19) +#define PD_CLK (0x1 << 18) +#define PD_IO1 (0x1 << 17) +#define PD_IO0 (0x1 << 16) +#define PD_CLK_SHIFT 18 +#define PD_IO1_SHIFT 17 +#define PD_IO0_SHIFT 16 +#define SPARE_CLK (0x1 << 8) +#define SPARE_IO1 (0x1 << 4) +#define SPARE_IO0 (0x1 << 0) + +#define NVCSI_CIL_A_CONTROL 0x5c +#define NVCSI_CIL_B_CONTROL 0xc0 +#define T18X_BYPASS_LP_SEQ_SHIFT 7 +#define DESCKEW_COMPARE_SHIFT 20 +#define DESCKEW_SETTLE_SHIFT 16 +#define CLK_SETTLE_SHIFT 8 +#define THS_SETTLE_SHIFT 0 +#define DEFAULT_DESKEW_COMPARE (0x4 << DESCKEW_COMPARE_SHIFT) +#define DEFAULT_DESKEW_SETTLE (0x6 << DESCKEW_SETTLE_SHIFT) +#define DEFAULT_DPHY_CLK_SETTLE (0x21 << CLK_SETTLE_SHIFT) +#define DEFAULT_CPHY_CLK_SETTLE (0x1 << CLK_SETTLE_SHIFT) +#define T18X_BYPASS_LP_SEQ (0x1 << T18X_BYPASS_LP_SEQ_SHIFT) +#define RESET_DESKEW_COMPARE (0x1 << DESCKEW_COMPARE_SHIFT) +#define RESET_DESKEW_SETTLE (0x1 << DESCKEW_SETTLE_SHIFT) +#define DEFAULT_THS_SETTLE (0x14 << THS_SETTLE_SHIFT) + +#define NVCSI_CIL_A_POLARITY_SWIZZLE_CTRL 0x58 +#define NVCSI_CIL_B_POLARITY_SWIZZLE_CTRL 0xbc +#define NVCSI_CIL_LANE_SWIZZLE_CTRL 0x10 + +#define NVCSI_CIL_A_DPHY_INADJ_CTRL (0x24) +#define NVCSI_CIL_A_CPHY_INADJ_CTRL (0x28) +#define NVCSI_CIL_B_DPHY_INADJ_CTRL (0x88) +#define NVCSI_CIL_B_CPHY_INADJ_CTRL (0x8c) +#define DEFAULT_SW_SET_DPHY_INADJ_IO0 (0x1 << 6) +#define DEFAULT_SW_SET_DPHY_INADJ_IO1 (0x1 << 14) +#define DEFAULT_DPHY_INADJ_IO0 (0xc) +#define DEFAULT_DPHY_INADJ_IO1 (0xc << 8) +#define DEFAULT_CPHY_EDGE_DELAY_TRIO0 (0x1 << 19) +#define DEFAULT_CPHY_EDGE_DELAY_TRIO1 (0x1 << 23) + + +/* MIPICAL */ +#define NVCSI_CIL_A_BASE 0x18 +#define NVCSI_CIL_B_BASE 0x7c +#define PAD_CONFIG_0 0x8 + +#endif /* __CSI4_REGISTERS_H__ */ diff --git a/include/media/csi5_registers.h b/include/media/csi5_registers.h new file mode 100644 index 00000000..ae002c4f --- /dev/null +++ b/include/media/csi5_registers.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra 19x CSI register offsets + * + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + */ + + +#ifndef __CSI5_REGISTERS_H__ +#define __CSI5_REGISTERS_H__ + +#define CSI5_BASE_ADDRESS 0x011000 +#define CSI5_PHY_OFFSET 0x010000 + +#define CSI5_TEGRA_CSI_STREAM_0_BASE 0x10000 +#define CSI5_TEGRA_CSI_STREAM_2_BASE 0x20000 +#define CSI5_TEGRA_CSI_STREAM_4_BASE 0x30000 + +#define CSI5_NVCSI_CIL_A_SW_RESET 0x24 +#define CSI5_NVCSI_CIL_B_SW_RESET 0xb0 +#define CSI5_SW_RESET1_EN (0x1 << 1) +#define CSI5_SW_RESET0_EN (0x1 << 0) + +#define CSI5_E_INPUT_LP_IO1_SHIFT 22 +#define CSI5_E_INPUT_LP_IO0_SHIFT 21 +#define CSI5_E_INPUT_LP_CLK_SHIFT 20 +#define CSI5_E_INPUT_LP_IO1 (0x1 << 22) +#define CSI5_E_INPUT_LP_IO0 (0x1 << 21) +#define CSI5_E_INPUT_LP_CLK (0x1 << 20) +#define CSI5_PD_CLK (0x1 << 18) +#define CSI5_PD_IO1 (0x1 << 17) +#define CSI5_PD_IO0 (0x1 << 16) +#define CSI5_PD_CLK_SHIFT 18 +#define CSI5_PD_IO1_SHIFT 17 +#define CSI5_PD_IO0_SHIFT 16 + +/* MIPICAL */ +#define CSI5_NVCSI_CIL_A_BASE 0x24 +#define CSI5_NVCSI_CIL_B_BASE 0xb0 +#define CSI5_PAD_CONFIG_0 0x8 + +#endif /* __CSI5_REGISTERS_H__ */ diff --git a/include/media/fusa-capture/capture-common.h b/include/media/fusa-capture/capture-common.h new file mode 100644 index 00000000..bcf88748 --- /dev/null +++ b/include/media/fusa-capture/capture-common.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + */ + +/** + * @file include/media/fusa-capture/capture-common.h + * + * @brief VI/ISP channel common operations header for the T186/T194 Camera RTCPU + * platform. + */ + +#ifndef __FUSA_CAPTURE_COMMON_H__ +#define __FUSA_CAPTURE_COMMON_H__ + +#include + +struct capture_buffer_table; +struct capture_mapping; + +/** + * @defgroup CAPTURE_PROGRESS_NOTIFIER_STATES + * + * Progress state of a capture request. + * + * @note PROGRESS_STATUS_DONE only means that the capture request has been + * completed, the descriptor status must still be read in order to + * determine whether or not it was successful or in error. + * + * @{ + */ + +/** Capture request is enqueued or in progress */ +#define PROGRESS_STATUS_BUSY (U32_C(0x1)) +/** Capture request is complete and the data can be consumed */ +#define PROGRESS_STATUS_DONE (U32_C(0x2)) + +/** @} */ + +/** + * @defgroup CAPTURE_BUFFER_OPS + * + * Capture surface buffer operations and DMA directions. + * + * @{ + */ + +/** @brief DMA @em to device data direction. */ +#define BUFFER_READ (U32_C(0x01)) + +/** @brief DMA @em from device data direction. */ +#define BUFFER_WRITE (U32_C(0x02)) + +/** @brief Add buffer to the channel's management table. */ +#define BUFFER_ADD (U32_C(0x04)) + +/** @brief DMA bidirectional data direction. */ +#define BUFFER_RDWR (BUFFER_READ | BUFFER_WRITE) + +/** @} */ + +/** @brief max pin count per request. Used to preallocate unpin list */ +#define MAX_PIN_BUFFER_PER_REQUEST (U32_C(24)) + + + +/** + * @brief Initialize the capture surface management table for SLAB allocations. + * + * @param[in] dev Originating device (VI or ISP) + * + * @returns pointer to table on success, NULL on error + */ +struct capture_buffer_table *create_buffer_table( + struct device *dev); + +/** + * @brief Release all capture buffers and free the management table + * + * @param[in,out] tab Surface buffer management table + */ +void destroy_buffer_table( + struct capture_buffer_table *tab); + +/** + * @brief Perform a buffer management operation on a capture surface buffer. + * + * @param[in,out] tab Surface buffer management table + * @param[in] memfd FD or NvRm handle to buffer + * @param[in] flag Surface BUFFER_* op bitmask + * + * @returns 0 (success), neg. errno (failure) + */ +int capture_buffer_request( + struct capture_buffer_table *tab, + uint32_t memfd, + uint32_t flag); + +/** + * @brief Add a capture surface buffer to the buffer management table. + * + * @param[in,out] t Surface buffer management table + * @param[in] fd FD or NvRm handle to buffer + * + * @returns 0 (success), neg. errno (failure) + */ +int capture_buffer_add( + struct capture_buffer_table *t, + uint32_t fd); + +/** + * @brief Decrement refcount for buffer mapping, and release it if it reaches + * zero, unless it is a preserved mapping. + * + * @param[in,out] t Surface buffer management table + * @param[in,out] pin Surface buffer to unpin + */ +void put_mapping( + struct capture_buffer_table *t, + struct capture_mapping *pin); + +/** + * @brief Capture surface buffer context. + */ +struct capture_common_buf { + struct dma_buf *buf; /**< dma_buf context */ + struct dma_buf_attachment *attach; /**< dma_buf attachment context */ + struct sg_table *sgt; /**< scatterlist table */ + dma_addr_t iova; /**< dma address */ + void *va; /**< virtual address for kernel access */ +}; + +/** + * @brief List of buffers to unpin for a capture request. + */ +struct capture_common_unpins { + uint32_t num_unpins; /**< No. of entries in data[] */ + struct capture_mapping *data[MAX_PIN_BUFFER_PER_REQUEST]; /**< Surface buffers to unpin */ +}; + +/** + * @brief Progress status notifier handle. + */ +struct capture_common_status_notifier { + struct dma_buf *buf; /**< dma_buf handle */ + void *va; /**< buffer virtual mapping to kernel address space */ + uint32_t offset; /**< status notifier offset [byte] */ +}; + +/** + * @brief Setup the progress status notifier handle + * + * @param[in] status_notifer Progress status notifier handle + * @param[in] mem FD or NvRm handle to buffer + * @param[in] buffer_size Buffer size [byte] + * @param[in] mem_offset Status notifier offset [byte] + * + * @returns 0 (success), neg. errno (failure) + */ +int capture_common_setup_progress_status_notifier( + struct capture_common_status_notifier *status_notifier, + uint32_t mem, + uint32_t buffer_size, + uint32_t mem_offset); + +/** + * @brief Release the progress status notifier handle. + * + * @param[in,out] progress_status_notifier Progress status notifier + * handle to release + * + * @returns 0 + */ +int capture_common_release_progress_status_notifier( + struct capture_common_status_notifier *progress_status_notifier); + +/** + * @brief Update the progress status for a capture request. + * + * @param[in] progress_status_notifier Progress status notifier handle + * @param[in] buffer_slot Capture descriptor index + * @param[in] buffer_depth Capture descriptor queue size + * @param[in] new_val Progress status to set + * + * @returns 0 (success), neg. errno (failure) + */ +int capture_common_set_progress_status( + struct capture_common_status_notifier *progress_status_notifier, + uint32_t buffer_slot, + uint32_t buffer_depth, + uint8_t new_val); + +/** + * @brief Pins buffer memory, returns dma_buf handles for unpinning. + * + * @param[in] dev target device (rtcpu) + * @param[in] mem FD or NvRm handle to buffer + * @param[out] unpin_data struct w/ dma_buf handles for unpinning + * + * @returns 0 (success), neg. errno (failure) + */ +int capture_common_pin_memory( + struct device *dev, + uint32_t mem, + struct capture_common_buf *unpin_data); + +/** + * @brief Unpins buffer memory, releasing dma_buf resources. + * + * @param[in,out] unpin_data data handle to be unpinned + */ +void capture_common_unpin_memory( + struct capture_common_buf *unpin_data); + +/** + * @brief Pins (maps) the physical address for provided capture surface address + * and updates the iova pointer. + * + * @param[in,out] buf_ctx Surface buffer management table + * @param[in] mem_handle Memory handle (descriptor). Can be NULL, + * in this case function will do nothing and + * and return 0. This is to simplify handling of + * capture descriptors data fields, NULL indicates + * unused memory surface. + * @param[in] mem_offset Offset inside memory buffer + * @param[out] meminfo_base_address Surface iova address, including offset + * @param[out] meminfo_size Size of iova range, excluding offset + * @param[in,out] unpins Unpin data used to unref/unmap buffer + * after capture + */ +int capture_common_pin_and_get_iova(struct capture_buffer_table *buf_ctx, + uint32_t mem_handle, uint64_t mem_offset, + uint64_t *meminfo_base_address, uint64_t *meminfo_size, + struct capture_common_unpins *unpins); + +#endif /* __FUSA_CAPTURE_COMMON_H__*/ diff --git a/include/media/fusa-capture/capture-isp-channel.h b/include/media/fusa-capture/capture-isp-channel.h new file mode 100644 index 00000000..4d3fe1d1 --- /dev/null +++ b/include/media/fusa-capture/capture-isp-channel.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + */ + +/** + * @file include/media/fusa-capture/capture-isp-channel.h + * + * @brief ISP channel character device driver header for the T186/T194 Camera + * RTCPU platform. + */ + +#ifndef __FUSA_CAPTURE_ISP_CHANNEL_H__ +#define __FUSA_CAPTURE_ISP_CHANNEL_H__ + +#include + +struct isp_channel_drv; + +/** + * @brief ISP fops for Host1x syncpt/gos allocations + * + * This fops is a HAL for chip/IP generations, see the respective VI platform + * drivers for the implementations. + */ +struct isp_channel_drv_ops { + /** + * @brief Request a syncpt allocation from Host1x. + * + * @param[in] pdev ISP platform_device + * @param[in] name syncpt name + * @param[out] syncpt_id assigned syncpt id + * + * @returns 0 (success), neg. errno (failure) + */ + int (*alloc_syncpt)( + struct platform_device *pdev, + const char *name, + uint32_t *syncpt_id); + + /** + * @brief Release a syncpt to Host1x. + * + * @param[in] pdev ISP platform_device + * @param[in] id syncpt id to free + */ + void (*release_syncpt)( + struct platform_device *pdev, + uint32_t id); + + /** + * @brief Retrieve the GoS table allocated in the ISP-THI carveout. + * + * @param[in] pdev ISP platform_device + * @param[out] table GoS table pointer + */ + uint32_t (*get_gos_table)( + struct platform_device *pdev, + const dma_addr_t **table); + + /** + * @brief Get a syncpt's GoS backing in the ISP-THI carveout. + * + * @param[in] pdev ISP platform_device + * @param[in] id syncpt id + * @param[out] gos_index GoS id + * @param[out] gos_offset Offset of syncpt within GoS [dword] + * + * @returns 0 (success), neg. errno (failure) + */ + int (*get_syncpt_gos_backing)( + struct platform_device *pdev, + uint32_t id, + dma_addr_t *syncpt_addr, + uint32_t *gos_index, + uint32_t *gos_offset); +}; + +/** + * @brief ISP channel context (character device). + */ +struct tegra_isp_channel { + struct device *isp_dev; /**< ISP device */ + struct platform_device *ndev; /**< ISP platform_device */ + struct isp_channel_drv *drv; /**< ISP channel driver context */ + void *priv; /**< ISP channel private context */ + struct isp_capture *capture_data; /**< ISP channel capture context */ + const struct isp_channel_drv_ops *ops; /**< ISP syncpt/gos fops */ +}; + +/** + * @brief Create the ISP channels driver contexts, and instantiate + * MAX_ISP_CHANNELS many channel character device nodes. + * + * ISP channel nodes appear in the filesystem as: + * /dev/capture-isp-channel{0..MAX_ISP_CHANNELS-1} + * + * @param[in] ndev ISP platform_device context + * @param[in] ops isp_channel_drv_ops fops + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_channel_drv_register( + struct platform_device *pdev, + const struct isp_channel_drv_ops *ops); + +/** + * @brief Destroy the ISP channels driver and all character device nodes. + * + * The ISP channels driver and associated channel contexts in memory are freed, + * rendering the ISP platform driver unusable until re-initialized. + * + * @param[in] dev ISP device context + */ +void isp_channel_drv_unregister( + struct device *dev); + +int isp_channel_drv_init(void); +void isp_channel_drv_exit(void); +#endif /* __FUSA_CAPTURE_ISP_CHANNEL_H__ */ diff --git a/include/media/fusa-capture/capture-isp.h b/include/media/fusa-capture/capture-isp.h new file mode 100644 index 00000000..6a55f1b2 --- /dev/null +++ b/include/media/fusa-capture/capture-isp.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + */ + +/** + * @file include/media/fusa-capture/capture-isp.h + * + * @brief ISP channel operations header for the T186/T194 Camera RTCPU platform. + */ + +#ifndef __FUSA_CAPTURE_ISP_H__ +#define __FUSA_CAPTURE_ISP_H__ + +#if defined(__KERNEL__) +#include +#include +#else +#include +#endif + +#include + +#define __ISP_CAPTURE_ALIGN __aligned(8) + +struct tegra_isp_channel; + +/** + * @brief ISP descriptor relocs config. + */ +struct capture_isp_reloc { + uint32_t num_relocs; /**< No. of buffers to pin/reloc */ + uint32_t __pad; + uint64_t reloc_relatives; + /**< Offsets buffer addresses to patch in descriptor */ +} __ISP_CAPTURE_ALIGN; + +/** + * @brief ISP channel setup config (IOCTL payload). + * + * These fields are used to set up the ISP channel and capture contexts, and + * will be copied verbatim in the IVC capture_channel_isp_config struct to + * allocate ISP resources in the RCE subsystem. + */ +struct isp_capture_setup { + uint32_t channel_flags; + /**< + * Bitmask for channel flags, see @ref CAPTURE_ISP_CHANNEL_FLAGS + */ + uint32_t __pad_flags; + + /* ISP process capture descriptor queue (ring buffer) */ + uint32_t queue_depth; + /**< No. of process capture descriptors in queue */ + uint32_t request_size; + /**< Size of a single process capture descriptor [byte] */ + uint32_t mem; /**< Process capture descriptors queue NvRm handle */ + + /* ISP process program descriptor queue (ring buffer) */ + uint32_t isp_program_queue_depth; + /**< No. of process program descriptors in queue */ + uint32_t isp_program_request_size; + /**< Size of a single process program descriptor [byte] */ + uint32_t isp_program_mem; + /**< Process program descriptors queue NvRm handle */ + uint32_t error_mask_correctable; + /**< + * Bitmask for correctable channel errors. See + * @ref CAPTURE_ISP_CHANNEL_ERRORS + */ + uint32_t error_mask_uncorrectable; + /**< + * Bitmask for uncorrectable channel errors. See + * @ref CAPTURE_ISP_CHANNEL_ERRORS + */ +} __ISP_CAPTURE_ALIGN; + +/** + * @brief ISP capture info (resp. to query). + */ +struct isp_capture_info { + struct isp_capture_syncpts { + uint32_t progress_syncpt; /**< Progress syncpoint id */ + uint32_t progress_syncpt_val; /**< Progress syncpoint value */ + uint32_t stats_progress_syncpt; + /**< Stats progress syncpoint id */ + uint32_t stats_progress_syncpt_val; + /**< Stats progress syncpoint value */ + } syncpts; + uint32_t channel_id; /**< RCE-assigned ISP FW channel id */ +} __ISP_CAPTURE_ALIGN; + +/** + * @brief ISP process capture request (IOCTL payload). + */ +struct isp_capture_req { + uint32_t buffer_index; /**< Process descriptor index */ + uint32_t __pad; + struct capture_isp_reloc isp_relocs; + /**< + * Surface buffers pin/reloc config. See @ref capture_isp_reloc + */ + struct capture_isp_reloc inputfences_relocs; + /**< + * Inputfences to pin/reloc. config. See @ref capture_isp_reloc + */ + uint32_t gos_relative; /* GoS offset [byte] */ + uint32_t sp_relative; /* Syncpt offset [byte] */ + struct capture_isp_reloc prefences_relocs; + /**< + * Prefences to pin/reloc. config. See @ref capture_isp_reloc + */ +} __ISP_CAPTURE_ALIGN; + +/** + * @brief ISP process program request (IOCTL payload). + */ +struct isp_program_req { + uint32_t buffer_index; /**< Program descriptor index. */ + uint32_t __pad; + struct capture_isp_reloc isp_program_relocs; + /**< + * Push buffers to pin/reloc. config. See + * @ref capture_isp_reloc + */ +} __ISP_CAPTURE_ALIGN; + +/** + * @brief ISP joint capture+program request (IOCTL payload). + */ +struct isp_capture_req_ex { + struct isp_capture_req capture_req; /**< ISP capture process request */ + struct isp_program_req program_req; /**< ISP program process request */ + uint32_t __pad[4]; +} __ISP_CAPTURE_ALIGN; + +/** + * @brief ISP capture progress status setup config (IOCTL payload). + */ +struct isp_capture_progress_status_req { + uint32_t mem; /**< NvRm handle to buffer region start */ + uint32_t mem_offset; /**< Status notifier offset [byte] */ + uint32_t process_buffer_depth; + /**< Process capture descriptor queue size [num] */ + uint32_t program_buffer_depth; + /**< Process program descriptor queue size [num] */ + uint32_t __pad[4]; +} __ISP_CAPTURE_ALIGN; + +/** + * @brief Add ISP capture buffer to management table (IOCTL payload). + */ +struct isp_buffer_req { + uint32_t mem; /**< NvRm handle to buffer */ + uint32_t flag; /**< Buffer @ref CAPTURE_BUFFER_OPS bitmask */ +} __ISP_CAPTURE_ALIGN; + +/** + * @brief Initialize an ISP channel capture context (at channel open). + * + * The ISP channel context is already partially-initialized by the calling + * function, the channel capture context is allocated and linked here. + * + * @param[in,out] chan Allocated ISP channel context, + * partially-initialized + + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_init( + struct tegra_isp_channel *chan); + +/** + * @brief De-initialize an ISP capture channel, closing open ISP streams, and + * freeing the buffer management table and channel capture context. + * + * The ISP channel context is not freed in this function, only the capture + * context is. + * + * The ISP channel should have been RESET and RELEASE'd when this function is + * called, but they may still be active due to programming error or client UMD + * crash. In such cases, they will be called automatically by the @em Abort + * functionality. + * + * @param[in,out] chan VI channel context + */ +void isp_capture_shutdown( + struct tegra_isp_channel *chan); + +/** + * @brief Open an ISP channel in RCE, sending channel configuration to request a + * SW channel allocation. Syncpts are allocated by the KMD in this subroutine. + * + * @param[in,out] chan ISP channel context + * @param[in] setup ISP channel setup config + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_setup( + struct tegra_isp_channel *chan, + struct isp_capture_setup *setup); + +/** + * @brief Reset an opened ISP channel, all pending process requests to RCE are + * discarded. + * + * The channel's progress syncpoint is advanced to the threshold of the latest + * capture/program request to unblock any waiting observers. + * + * A reset barrier may be enqueued in the capture IVC channel to flush stale + * capture/program descriptors, in case of abnormal channel termination. + * + * @param[in] chan VI channel context + * @param[in] reset_flags Bitmask for ISP channel reset options + * (CAPTURE_CHANNEL_RESET_FLAG_*) + + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_reset( + struct tegra_isp_channel *chan, + uint32_t reset_flags); + +/** + * @brief Release an opened ISP channel; the RCE channel allocation, syncpoints + * and IVC channel callbacks are released. + * + * @param[in] chan ISP channel context + * @param[in] reset_flags Bitmask for ISP channel reset options + * (CAPTURE_CHANNEL_RESET_FLAG_*) + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_release( + struct tegra_isp_channel *chan, + uint32_t reset_flags); + +/** + * @brief Query an ISP channel's syncpoint ids and values, and retrieve the + * RCE-assigned ISP FW channel id. + * + * @param[in] chan ISP channel context + * @param[out] info ISP channel info response + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_get_info( + struct tegra_isp_channel *chan, + struct isp_capture_info *info); + +/** + * @brief Send a capture (aka. process) request for a frame via the capture IVC + * channel to RCE. + * + * This is a non-blocking call. + * + * @param[in] chan ISP channel context + * @param[in] req ISP process capture request + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_request( + struct tegra_isp_channel *chan, + struct isp_capture_req *req); + +/** + * @brief Wait on receipt of the capture status of the head of the capture + * request FIFO queue to RCE. The RCE ISP driver sends a CAPTURE_ISP_STATUS_IND + * notification at frame completion. + * + * This is a blocking call, with the possibility of timeout. + * + * @todo The capture progress status notifier is expected to replace this + * functionality in the future, deprecating it. + * + * @param[in] chan ISP channel context + * @param[in] timeout_ms Time to wait for status completion [ms], set to + * 0 for indefinite + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_status( + struct tegra_isp_channel *chan, + int32_t timeout_ms); + +/** + * @brief Send a program request containing an ISP pushbuffer configuration via + * the capture IVC channel to RCE. + * + * This is a non-blocking call. + * + * @param[in] chan ISP channel context + * @param[in] req ISP program request + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_program_request( + struct tegra_isp_channel *chan, + struct isp_program_req *req); + +/** + * @brief Wait on receipt of the program status of the head of the program + * request FIFO queue to RCE. The RCE ISP driver sends a + * CAPTURE_ISP_PROGRAM_STATUS_IND notification at completion. + * + * This is a blocking call, with no possibility of timeout; as programs may be + * reused for multiple frames. + * + * @todo The capture progress status notifier is expected to replace this + * functionality in the future, deprecating it. + * + * @param[in] chan ISP channel context + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_program_status( + struct tegra_isp_channel *chan); + +/** + * @brief Send an extended capture (aka. process) request for a frame, + * containing the ISP pushbuffer program to execute via the capture IVC channel + * to RCE. + * + * The extended call is equivalent to separately sending a capture and a program + * request for every frame; it is an optimization to reduce the number of + * system context switches from IOCTL and IVC calls. + * + * This is a non-blocking call. + * + * @param[in] chan ISP channel context + * @param[in] req ISP extended process request + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_request_ex( + struct tegra_isp_channel *chan, + struct isp_capture_req_ex *req); + +/** + * @brief Setup ISP channel capture status progress notifier + * + * @param[in] chan ISP channel context + * @param[in] req ISP capture progress status setup config + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_set_progress_status_notifier( + struct tegra_isp_channel *chan, + struct isp_capture_progress_status_req *req); + +/** + * @brief Perform a buffer management operation on an ISP capture buffer. + * + * @param[in] chan ISP channel context + * @param[in] req ISP capture buffer request + * + * @returns 0 (success), neg. errno (failure) + */ +int isp_capture_buffer_request( + struct tegra_isp_channel *chan, + struct isp_buffer_req *req); + +#endif /* __FUSA_CAPTURE_ISP_H__ */ diff --git a/include/media/fusa-capture/capture-vi-channel.h b/include/media/fusa-capture/capture-vi-channel.h new file mode 100644 index 00000000..5d91420a --- /dev/null +++ b/include/media/fusa-capture/capture-vi-channel.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved. + */ + +/** + * @file include/media/fusa-capture/capture-vi-channel.h + * + * @brief VI channel character device driver header for the T186/T194 Camera + * RTCPU platform. + */ + +#ifndef __FUSA_CAPTURE_VI_CHANNEL_H__ +#define __FUSA_CAPTURE_VI_CHANNEL_H__ + +#include + +struct vi_channel_drv; + +/** + * @brief VI fops for Host1x syncpt/gos allocations + * + * This fops is a HAL for chip/IP generations, see the respective VI platform + * drivers for the implementations. + */ +struct vi_channel_drv_ops { + /** + * Request a syncpt allocation from Host1x. + * + * @param[in] pdev VI platform_device + * @param[in] name syncpt name + * @param[out] syncpt_id assigned syncpt id + * + * @returns 0 (success), neg. errno (failure) + */ + int (*alloc_syncpt)( + struct platform_device *pdev, + const char *name, + uint32_t *syncpt_id); + + /** + * Release a syncpt to Host1x. + * + * @param[in] pdev VI platform_device + * @param[in] id syncpt id to free + */ + void (*release_syncpt)( + struct platform_device *pdev, + uint32_t id); + + /** + * Retrieve the GoS table allocated in the VI-THI carveout. + * + * @param[in] pdev VI platform_device + * @param[out] count No. of carveout devices + * @param[out] table GoS table pointer + */ + void (*get_gos_table)( + struct platform_device *pdev, + int *count, + const dma_addr_t **table); + + /** + * Get a syncpt's GoS backing in the VI-THI carveout. + * + * @param[in] pdev VI platform_device + * @param[in] id syncpt id + * @param[out] gos_index GoS id + * @param[out] gos_offset Offset of syncpt within GoS [dword] + * + * @returns 0 (success), neg. errno (failure) + */ + int (*get_syncpt_gos_backing)( + struct platform_device *pdev, + uint32_t id, + dma_addr_t *syncpt_addr, + uint32_t *gos_index, + uint32_t *gos_offset); +}; + +/** + * @brief VI channel character device driver context. + */ +struct vi_channel_drv { + struct platform_device *vi_capture_pdev; + /**< Capture VI driver platform device */ + bool use_legacy_path; + /**< Flag to maintain backward-compatibility for T186 */ + struct device *dev; /**< VI kernel @em device */ + struct platform_device *ndev; /**< VI kernel @em platform_device */ + struct mutex lock; /**< VI channel driver context lock */ + u8 num_channels; /**< No. of VI channel character devices */ + const struct vi_channel_drv_ops *ops; + /**< VI fops for Host1x syncpt/gos allocations */ + struct tegra_vi_channel __rcu *channels[]; + /**< Allocated VI channel contexts */ +}; + +/** + * @brief VI channel context (character device) + */ +struct tegra_vi_channel { + struct device *dev; /**< VI device */ + struct platform_device *ndev; /**< VI nvhost platform_device */ + struct platform_device *vi_capture_pdev; + /**< Capture VI driver platform device */ + struct vi_channel_drv *drv; /**< VI channel driver context */ + struct rcu_head rcu; /**< VI channel rcu */ + struct vi_capture *capture_data; /**< VI channel capture context */ + const struct vi_channel_drv_ops *ops; /**< VI syncpt/gos fops */ + struct device *rtcpu_dev; /**< rtcpu device */ + bool is_stream_opened; /**< Whether the NVCSI stream is opened */ +}; + +/** + * @brief Create the VI channels driver contexts, and instantiate + * as many channel character device nodes as specified in the device tree. + * + * VI channel nodes appear in the filesystem as: + * /dev/capture-vi-channel{0..max_vi_channels-1} + * + * @param[in] ndev VI platform_device context + * @param[in] max_vi_channels Maximum number of VI channels + * @returns 0 (success), neg. errno (failure) + */ +int vi_channel_drv_register( + struct platform_device *ndev, unsigned int max_vi_channels); + +/** + * @brief Destroy the VI channels driver and all character device nodes. + * + * The VI channels driver and associated channel contexts in memory are freed, + * rendering the VI platform driver unusable until re-initialized. + * + * @param[in] dev VI device context + */ +void vi_channel_drv_unregister( + struct device *dev); + +/** + * @brief Register the chip specific syncpt/gos related function table + * + * @param[in] ops vi_channel_drv_ops fops + * @returns 0 (success), neg. errno (failure) + */ +int vi_channel_drv_fops_register( + const struct vi_channel_drv_ops *ops); + +/** + * @brief Unpin and free the list of pinned capture_mapping's associated with a + * VI capture request. + * + * @param[in] chan VI channel context + * @param[in] buffer_index Capture descriptor queue index + */ +void vi_capture_request_unpin( + struct tegra_vi_channel *chan, + uint32_t buffer_index); + +/* + * Internal APIs for V4L2 driver (aka. VI mode) + */ + +/** + * @brief Open a VI channel character device node, power on the camera subsystem + * and initialize the channel driver context. + * + * The act of opening a VI channel character device node does not entail the + * reservation of a VI channel, VI_CAPTURE_SETUP must be called afterwards to + * request an allocation by RCE. + * + * @param[in] channel VI channel enumerated node iminor no. + * @param[in] is_mem_pinned Whether capture request memory will be pinned + * + * @returns tegra_vi_channel pointer (success), ERR_PTR (failure) + */ +struct tegra_vi_channel *vi_channel_open_ex( + unsigned int channel, + bool is_mem_pinned); + +/** + * @brief Release a VI channel character device node, power off the camera + * subsystem and free the VI channel driver context. + * + * Under normal operation, the NVCSI stream and TPG source should be closed, and + * VI_CAPTURE_RESET followed by VI_CAPTURE_RELEASE should be called before + * releasing the file handle on the device node. + * + * If the user-mode client crashes, the operating system will call this + * @em release handler to perform all of those actions as part of the @em Abort + * functionality. + * + * @param[in] channel VI channel enumerated node iminor no. + * @param[in] chan VI channel context + * + * @returns 0 + */ +int vi_channel_close_ex( + unsigned int channel, + struct tegra_vi_channel *chan); + +int vi_channel_drv_init(void); +void vi_channel_drv_exit(void); + +#endif /* __FUSA_CAPTURE_VI_CHANNEL_H__ */ diff --git a/include/media/fusa-capture/capture-vi.h b/include/media/fusa-capture/capture-vi.h new file mode 100644 index 00000000..62dd8de9 --- /dev/null +++ b/include/media/fusa-capture/capture-vi.h @@ -0,0 +1,454 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved. + */ + +/** + * @file include/media/fusa-capture/capture-vi.h + * + * @brief VI channel operations header for the T186/T194 Camera RTCPU platform. + */ + +#ifndef __FUSA_CAPTURE_VI_H__ +#define __FUSA_CAPTURE_VI_H__ + +#if defined(__KERNEL__) +#include +#include +#else +#include +#endif +#include +#include +#include "soc/tegra/camrtc-capture.h" +#include "soc/tegra/camrtc-capture-messages.h" + +#define __VI_CAPTURE_ALIGN __aligned(8) + +struct tegra_vi_channel; +struct capture_buffer_table; + +/** + * @brief VI channel capture context. + */ +struct vi_capture { + uint16_t channel_id; /**< RCE-assigned VI FW channel id */ + struct device *rtcpu_dev; /**< rtcpu device */ + struct tegra_vi_channel *vi_channel; /**< VI channel context */ + struct capture_buffer_table *buf_ctx; + /**< Surface buffer management table */ + struct capture_common_buf requests; /**< Capture descriptors queue */ + struct capture_descriptor_memoryinfo *requests_memoryinfo; + /**< memory info ringbuffer handle*/ + uint64_t requests_memoryinfo_iova; + /**< memory info ringbuffer rtcpu iova */ + size_t request_buf_size; + /**< Size of capture descriptor queue [byte] */ + uint32_t queue_depth; /**< No. of capture descriptors in queue */ + uint32_t request_size; /**< Size of single capture descriptor [byte] */ + bool is_mem_pinned; /**< Whether capture request memory is pinned */ + + struct capture_common_status_notifier progress_status_notifier; + /**< Capture progress status notifier context */ + uint32_t progress_status_buffer_depth; + /**< No. of capture descriptors */ + bool is_progress_status_notifier_set; + /**< Whether progress_status_notifer has been initialized */ + + uint32_t stream_id; /**< NVCSI PixelParser index [0-5] */ + uint32_t csi_port; /**< NVCSI ports A-H [0-7] */ + uint32_t virtual_channel_id; /**< CSI virtual channel id [0-15] */ + + uint32_t num_gos_tables; /**< No. of cv devices in gos_tables */ + const dma_addr_t *gos_tables; /**< IOVA addresses of all GoS devices */ + + struct syncpoint_info progress_sp; /**< Syncpoint for frame progress */ + struct syncpoint_info embdata_sp; + /**< Syncpoint for embedded metadata */ + struct syncpoint_info linetimer_sp; + /**< Syncpoint for frame line timer */ + + struct completion control_resp; + /**< Completion for capture-control IVC response */ + struct completion capture_resp; + /**< + * Completion for capture requests (frame), if progress status + * notifier is not in use + */ + struct mutex control_msg_lock; + /**< Lock for capture-control IVC control_resp_msg */ + struct CAPTURE_CONTROL_MSG control_resp_msg; + /**< capture-control IVC resp msg written to by callback */ + + struct mutex reset_lock; + /**< Channel lock for reset/abort support (via RCE) */ + struct mutex unpins_list_lock; /**< Lock for unpins_list */ + struct capture_common_unpins *unpins_list; + /**< List of capture request buffer unpins */ + + uint64_t vi_channel_mask; + /**< Bitmask of RCE-assigned VI FW channel(s). */ + uint64_t vi2_channel_mask; + /**< Bitmask of RCE-assigned VI FW channel(s) for 2nd VI. */ +}; + +/** + * @brief VI channel setup config (IOCTL payload). + * + * These fields are used to set up the VI channel and capture contexts, and will + * be copied verbatim in the IVC capture_channel_config struct to allocate VI + * resources in the RCE subsystem. + */ +struct vi_capture_setup { + uint32_t channel_flags; + /**< + * Bitmask for channel flags, see @ref CAPTURE_CHANNEL_FLAGS + */ + uint32_t error_mask_correctable; + /**< + * Bitmask for correctable channel errors. See + * @ref CAPTURE_CHANNEL_ERRORS + */ + uint64_t vi_channel_mask; + /**< Bitmask of VI channels to consider for allocation by RCE */ + uint64_t vi2_channel_mask; + /**< Bitmask of 2nd VI channels */ + uint32_t queue_depth; /**< No. of capture descriptors in queue. */ + uint32_t request_size; + /**< Size of a single capture descriptor [byte] */ + union { + uint32_t mem; /**< Capture descriptors queue NvRm handle */ + uint64_t iova; + /**< + * Capture descriptors queue base address (written back + * after pinning by KMD) + */ + }; + uint8_t slvsec_stream_main; + /**< SLVS-EC main stream (hardcode to 0x00) */ + uint8_t slvsec_stream_sub; + /**< SLVS-EC sub stream (hardcode to 0xFF - disabled) */ + uint16_t __pad_slvsec1; + + uint32_t csi_stream_id; /**< NVCSI PixelParser index [0-5] */ + uint32_t virtual_channel_id; /**< Virtual Channel index [0-15] */ + uint32_t csi_port; /**< NVCSI Port [0-7], not valid for TPG */ + uint32_t __pad_csi; /**< Reserved */ + + uint32_t error_mask_uncorrectable; + /**< + * Bitmask for correctable channel errors. See + * @ref CAPTURE_CHANNEL_ERRORS + */ + uint64_t stop_on_error_notify_bits; + /**< + * Bitmask for NOTIFY errors that force channel stop upon + * receipt + */ + uint64_t reserved[2]; +} __VI_CAPTURE_ALIGN; + +/** + * @brief VI capture info (resp. to query). + */ +struct vi_capture_info { + struct vi_capture_syncpts { + uint32_t progress_syncpt; /**< Progress syncpoint id */ + uint32_t progress_syncpt_val; /**< Progress syncpoint value. */ + uint32_t emb_data_syncpt; /**< Embedded metadata syncpoint id */ + uint32_t emb_data_syncpt_val; + /**< Embedded metadata syncpt value. */ + uint32_t line_timer_syncpt; /**< Line timer syncpoint id */ + uint32_t line_timer_syncpt_val; + /**< Line timer syncpoint value */ + } syncpts; + uint32_t hw_channel_id; /**< RCE-assigned VI FW channel id */ + uint32_t __pad; + uint64_t vi_channel_mask; + /**< Bitmask of RCE-assigned VI FW channel(s) */ + uint64_t vi2_channel_mask; + /**< Bitmask of RCE-assigned VI FW channel(s) for 2nd VI */ +} __VI_CAPTURE_ALIGN; + +/** + * @brief Container for CAPTURE_CONTROL_MSG req./resp. from FuSa UMD (IOCTL + * payload). + * + * The response and request pointers may be to the same memory allocation; in + * which case the request message will be overwritten by the response. + */ +struct vi_capture_control_msg { + uint64_t ptr; /**< Pointer to capture-control message req. */ + uint32_t size; /**< Size of req./resp. msg [byte] */ + uint32_t __pad; + uint64_t response; /**< Pointer to capture-control message resp. */ +} __VI_CAPTURE_ALIGN; + +/** + * @brief VI capture request (IOCTL payload). + */ +struct vi_capture_req { + uint32_t buffer_index; /**< Capture descriptor index. */ + uint32_t num_relocs; /**< No. of surface buffers to pin/reloc. */ + uint64_t reloc_relatives; + /**< + * Offsets to surface buffer addresses to patch in capture + * descriptor [byte]. + */ +} __VI_CAPTURE_ALIGN; + +/** + * @brief VI capture progress status setup config (IOCTL payload) + */ +struct vi_capture_progress_status_req { + uint32_t mem; /**< NvRm handle to buffer region start. */ + uint32_t mem_offset; /**< Status notifier offset [byte]. */ + uint32_t buffer_depth; /**< Capture descriptor queue size [num]. */ + uint32_t __pad[3]; +} __VI_CAPTURE_ALIGN; + +/** + * @brief Add VI capture surface buffer to management table (IOCTL payload) + */ +struct vi_buffer_req { + uint32_t mem; /**< NvRm handle to buffer. */ + uint32_t flag; /**< Buffer @ref CAPTURE_BUFFER_OPS bitmask. */ +} __VI_CAPTURE_ALIGN; + +/** + * @brief The compand configuration describes a piece-wise linear tranformation + * function used by the VI companding module. + */ +#define VI_CAPTURE_NUM_COMPAND_KNEEPTS 10 + +/** + * @brief VI compand setup config (IOCTL payload). + */ +struct vi_capture_compand { + uint32_t base[VI_CAPTURE_NUM_COMPAND_KNEEPTS]; + /**< kneept base param. */ + uint32_t scale[VI_CAPTURE_NUM_COMPAND_KNEEPTS]; + /**< kneept scale param. */ + uint32_t offset[VI_CAPTURE_NUM_COMPAND_KNEEPTS]; + /**< kneept offset param. */ +} __VI_CAPTURE_ALIGN; + +/** + * @brief Initialize a VI channel capture context (at channel open). + * + * The VI channel context is already partially-initialized by the calling + * function, the channel capture context is allocated and linked here. + * + * @param[in,out] chan Allocated VI channel context, + * partially-initialized + * @param[in] is_mem_pinned Whether capture request memory is pinned + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_init( + struct tegra_vi_channel *chan, + bool is_mem_pinned); + +/** + * @brief De-initialize a VI capture channel, closing open VI/NVCSI streams, and + * freeing the buffer management table and channel capture context. + * + * The VI channel context is not freed in this function, only the capture + * context is. + * + * @param[in,out] chan VI channel context + */ +void vi_capture_shutdown( + struct tegra_vi_channel *chan); + +/** + * @brief Select the NvHost VI client instance platform driver to be + * associated with the channel. + * Only used in the case where VI standalone driver is used + * to enumerate the VI channel character drivers + * + * @param[in/out] chan VI channel context + * @param[in] setup VI channel setup config + * + */ +void vi_get_nvhost_device( + struct tegra_vi_channel *chan, + struct vi_capture_setup *setup); + +/** + * @brief The function returns the corresponding NvHost VI client device + * pointer associated with the NVCSI stream Id. A NULL value is returned + * if invalid input parameters are passed. + * + * @param[in] pdev VI capture platform device pointer + * @param[in] csi_stream_id NVCSI stream Id + * + * @returns reference to VI device (success), null (failure) + */ +struct device *vi_csi_stream_to_nvhost_device( + struct platform_device *pdev, + uint32_t csi_stream_id); + +/** + * @brief Open a VI channel in RCE, sending channel configuration to request a + * HW channel allocation. Syncpoints are allocated by the KMD in this + * subroutine. + * + * @param[in,out] chan VI channel context + * @param[in] setup VI channel setup config + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_setup( + struct tegra_vi_channel *chan, + struct vi_capture_setup *setup); + +/** + * @brief Get the pointer to tegra_vi_channel struct associated with the + * stream id and virtual id passed as function input params. + * + * If no valid tegra_vi_channel pointer is found associated with the given + * stream id/ VC id combo then NULL is returned. + * + * @param[in] stream_id CSI stream ID + * @param[in] virtual_channel_id CSI virtual channel ID + * + * @returns pointer to tegra_vi_channel(success), NULL(failure) + */ +struct tegra_vi_channel *get_tegra_vi_channel( + unsigned int stream_id, + unsigned int virtual_channel_id); +/** + * @brief Reset an opened VI channel, all pending capture requests to RCE are + * discarded. + * + * The channel's progress syncpoint is advanced to the threshold of the latest + * capture request to unblock any waiting observers. + * + * A reset barrier may be enqueued in the capture IVC channel to flush stale + * capture descriptors, in case of abnormal channel termination. + * + * @param[in] chan VI channel context + * @param[in] reset_flags Bitmask for VI channel reset options + * (CAPTURE_CHANNEL_RESET_FLAG_*) + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_reset( + struct tegra_vi_channel *chan, + uint32_t reset_flags); + +/** + * @brief Release an opened VI channel; the RCE channel allocation, syncpts and + * IVC channel callbacks are released. + * + * @param[in] chan VI channel context + * @param[in] reset_flags Bitmask for VI channel reset options + * (CAPTURE_CHANNEL_RESET_FLAG_*) + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_release( + struct tegra_vi_channel *chan, + uint32_t reset_flags); + +/** + * @brief Release the TPG and/or NVCSI stream on a VI channel, if they are + * active. + * + * This function normally does not execute except in the event of abnormal UMD + * termination, as it is the client's responsibility to open and close NVCSI and + * TPG sources. + * + * @param[in] chan VI channel context + * + * @returns 0 (success), neg. errno (failure) + */ +int csi_stream_release( + struct tegra_vi_channel *chan); + +/** + * @brief Send a capture-control IVC message to RCE and wait for a response. + * + * This is a blocking call, with the possibility of timeout. + * + * @param[in] chan VI channel context + * @param[in,out] msg capture-control IVC container w/ req./resp. pair + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_control_message( + struct tegra_vi_channel *chan, + struct vi_capture_control_msg *msg); + +/** + * @brief Send a capture-control IVC message which is received from + * userspace to RCE and wait for a response. + * + * This is a blocking call, with the possibility of timeout. + * + * @param[in] chan VI channel context + * @param[in,out] msg capture-control IVC container w/ req./resp. pair + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_control_message_from_user( + struct tegra_vi_channel *chan, + struct vi_capture_control_msg *msg); + +/** + * @brief Query a VI channel's syncpt ids and values, and retrieve the + * RCE-assigned VI FW channel id and mask. + * + * @param[in] chan VI channel context + * @param[out] info VI channel info response + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_get_info( + struct tegra_vi_channel *chan, + struct vi_capture_info *info); + +/** + * @brief Send a capture request for a frame via the capture IVC channel to RCE. + * + * This is a non-blocking call. + * + * @param[in] chan VI channel context + * @param[in] req VI capture request + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_request( + struct tegra_vi_channel *chan, + struct vi_capture_req *req); + +/** + * @brief Wait on receipt of the capture status of the head of the capture + * request FIFO queue to RCE. The RCE VI driver sends a + * CAPTURE_STATUS_IND notification at frame completion. + * + * This is a blocking call, with the possibility of timeout. + * + * @param[in] chan VI channel context + * @param[in] timeout_ms Time to wait for status completion [ms], set to + * 0 for indefinite + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_status( + struct tegra_vi_channel *chan, + int32_t timeout_ms); + +/** + * @brief Setup VI channel capture status progress notifier. + * + * @param[in] chan VI channel context + * @param[in] req VI capture progress status setup config + * + * @returns 0 (success), neg. errno (failure) + */ +int vi_capture_set_progress_status_notifier( + struct tegra_vi_channel *chan, + struct vi_capture_progress_status_req *req); + +#endif /* __FUSA_CAPTURE_VI_H__ */ diff --git a/include/media/mc_common.h b/include/media/mc_common.h new file mode 100644 index 00000000..c419adf5 --- /dev/null +++ b/include/media/mc_common.h @@ -0,0 +1,444 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra Media controller common APIs + * + * Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __CAMERA_MC_COMMON_H__ +#define __CAMERA_MC_COMMON_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_FORMAT_NUM 64 +#define MAX_SUBDEVICES 4 +#define QUEUED_BUFFERS 4 +#define ENABLE 1 +#define DISABLE 0 +#define MAX_SYNCPT_PER_CHANNEL 3 + +#define CAPTURE_MIN_BUFFERS 1U +#define CAPTURE_MAX_BUFFERS 240U + +#define TEGRA_MEM_FORMAT 0 +#define TEGRA_ISP_FORMAT 1 + +enum channel_capture_state { + CAPTURE_IDLE = 0, + CAPTURE_GOOD, + CAPTURE_TIMEOUT, + CAPTURE_ERROR, +}; + +enum tegra_vi_pg_mode { + TEGRA_VI_PG_DISABLED = 0, + TEGRA_VI_PG_DIRECT, + TEGRA_VI_PG_PATCH, +}; + +enum interlaced_type { + Top_Bottom = 0, + Interleaved, +}; + +/** + * struct tegra_channel_buffer - video channel buffer + * @buf: vb2 buffer base object + * @queue: buffer list entry in the channel queued buffers list + * @chan: channel that uses the buffer + * @vb2_state: V4L2 buffer state (active, done, error) + * @capture_descr_index: Index into the VI capture descriptor queue + * @addr: Tegra IOVA buffer address for VI output + */ +struct tegra_channel_buffer { + struct vb2_v4l2_buffer buf; + struct list_head queue; + struct tegra_channel *chan; + + unsigned int vb2_state; + unsigned int capture_descr_index[TEGRA_CSI_BLOCKS]; + + dma_addr_t addr; + + u32 thresh[TEGRA_CSI_BLOCKS]; + int version; + int state; +}; + +#define to_tegra_channel_buffer(vb) \ + container_of(vb, struct tegra_channel_buffer, buf) + +/** + * struct tegra_vi_graph_entity - Entity in the video graph + * @list: list entry in a graph entities list + * @node: the entity's DT node + * @entity: media entity, from the corresponding V4L2 subdev + * @asd: subdev asynchronous registration information + * @subdev: V4L2 subdev + */ +struct tegra_vi_graph_entity { + struct list_head list; + struct device_node *node; + struct media_entity *entity; + + struct v4l2_async_subdev asd; + struct v4l2_subdev *subdev; +}; + +/** + * struct tegra_channel - Tegra video channel + * @list: list entry in a composite device dmas list + * @video: V4L2 video device associated with the video channel + * @video_lock: + * @pad: media pad for the video device entity + * @pipe: pipeline belonging to the channel + * + * @vi: composite device DT node port number for the channel + * + * @kthread_capture: kernel thread task structure of this video channel + * @wait: wait queue structure for kernel thread + * + * @format: active V4L2 pixel format + * @fmtinfo: format information corresponding to the active @format + * + * @queue: vb2 buffers queue + * @alloc_ctx: allocation context for the vb2 @queue + * @sequence: V4L2 buffers sequence number + * + * @capture: list of queued buffers for capture + * @queued_lock: protects the buf_queued list + * + * @csi: CSI register bases + * @stride_align: channel buffer stride alignment, default is 1 + * @width_align: image width alignment, default is 1 + * @height_align: channel buffer height alignment, default is 1 + * @size_align: channel buffer size alignment, default is 1 + * @port: CSI port of this video channel + * @io_id: Tegra IO rail ID of this video channel + * + * @fmts_bitmap: a bitmap for formats supported + * @bypass: bypass flag for VI bypass mode + * @restart_version: incremented every time either capture or release threads + * wants to reset VI. it is appended to each buffer processed + * by the capture thread, and inspected by each buffer + * processed by the receive thread. + * @capture_version: thread-local copy of @restart_version created when the + * capture thread resets the VI. + */ +struct tegra_channel { + unsigned int id; + struct list_head list; + struct video_device *video; + struct media_pad pad; + struct media_pipeline pipe; + struct mutex video_lock; + + struct tegra_mc_vi *vi; + struct v4l2_subdev *subdev[MAX_SUBDEVICES]; + struct v4l2_subdev *subdev_on_csi; + + struct v4l2_ctrl_handler ctrl_handler; + struct v4l2_pix_format format; + const struct tegra_video_format *fmtinfo; + const struct tegra_video_format *video_formats[MAX_FORMAT_NUM]; + unsigned int num_video_formats; + struct mutex stop_kthread_lock; + + unsigned char port[TEGRA_CSI_BLOCKS]; + unsigned int virtual_channel; + unsigned int syncpt[TEGRA_CSI_BLOCKS][MAX_SYNCPT_PER_CHANNEL]; + unsigned int syncpoint_fifo[TEGRA_CSI_BLOCKS][MAX_SYNCPT_PER_CHANNEL]; + unsigned int buffer_offset[TEGRA_CSI_BLOCKS]; + unsigned int *buffer_state; + struct vb2_v4l2_buffer **buffers; + unsigned long timeout; + atomic_t restart_version; + int capture_version; + unsigned int save_index; + unsigned int free_index; + unsigned int num_buffers; + spinlock_t buffer_lock; + unsigned int released_bufs; + + unsigned int capture_queue_depth; + unsigned int capture_descr_index; + unsigned int capture_descr_sequence; + unsigned int capture_reqs_enqueued; + struct task_struct *kthread_capture_start; + struct task_struct *kthread_release; + wait_queue_head_t start_wait; + wait_queue_head_t release_wait; + struct task_struct *kthread_capture_dequeue; + wait_queue_head_t dequeue_wait; + struct vb2_queue queue; + void *alloc_ctx; + bool init_done; + struct list_head capture; + struct list_head release; + struct list_head dequeue; + spinlock_t start_lock; + spinlock_t release_lock; + spinlock_t dequeue_lock; + struct work_struct status_work; + struct work_struct error_work; + + void __iomem *csibase[TEGRA_CSI_BLOCKS]; + unsigned int stride_align; + unsigned int preferred_stride; + unsigned int width_align; + unsigned int height_align; + unsigned int size_align; + unsigned int valid_ports; + unsigned int total_ports; + unsigned int numlanes; + unsigned int io_id; + unsigned int num_subdevs; + unsigned int sequence; + unsigned int saved_ctx_bypass; + unsigned int saved_ctx_pgmode; + unsigned int gang_mode; + unsigned int gang_width; + unsigned int gang_height; + unsigned int gang_bytesperline; + unsigned int gang_sizeimage; + unsigned int embedded_data_width; + unsigned int embedded_data_height; + + DECLARE_BITMAP(fmts_bitmap, MAX_FORMAT_NUM); + atomic_t power_on_refcnt; + struct v4l2_fh *fh; + bool bypass; + bool write_ispformat; + bool low_latency; + enum tegra_vi_pg_mode pg_mode; + bool bfirst_fstart; + enum channel_capture_state capture_state; + bool queue_error; + spinlock_t capture_state_lock; + atomic_t is_streaming; + int requested_kbyteps; + unsigned long requested_hz; + + struct vi_notify_channel *vnc[TEGRA_CSI_BLOCKS]; + int vnc_id[TEGRA_CSI_BLOCKS]; + int grp_id; + + struct v4l2_async_notifier notifier; + struct list_head entities; + struct device_node *endpoint_node; /* endpoint of_node in vi */ + unsigned int subdevs_bound; + unsigned int link_status; + struct nvcsi_deskew_context *deskew_ctx; + struct tegra_vi_channel *tegra_vi_channel[TEGRA_CSI_BLOCKS]; + struct capture_descriptor *request[TEGRA_CSI_BLOCKS]; + bool is_slvsec; + int is_interlaced; + enum interlaced_type interlace_type; + int interlace_bplfactor; + + atomic_t syncpt_depth; + struct rw_semaphore reset_lock; + + dma_addr_t emb_buf; + void *emb_buf_addr; + unsigned int emb_buf_size; +}; + +#define to_tegra_channel(vdev) \ + container_of(vdev, struct tegra_channel, video) + +/** + * struct tegra_mc_vi - NVIDIA Tegra Media controller structure + * @v4l2_dev: V4L2 device + * @media_dev: media device + * @dev: device struct + * @tegra_camera: tegra camera structure + * @nvhost_device_data: NvHost VI device information + * + * @notifier: V4L2 asynchronous subdevs notifier + * @entities: entities in the graph as a list of tegra_vi_graph_entity + * @num_subdevs: number of subdevs in the pipeline + * + * @channels: list of channels at the pipeline output and input + * + * @ctrl_handler: V4L2 control handler + * @pattern: test pattern generator V4L2 control + * @pg_mode: test pattern generator mode (disabled/direct/patch) + * + * @has_sensors: a flag to indicate whether is a real sensor connecting + */ +struct tegra_mc_vi { + struct vi *vi; + struct platform_device *ndev; + struct v4l2_device v4l2_dev; + struct media_device media_dev; + struct device *dev; + struct nvhost_device_data *ndata; + + struct regulator *reg; + struct clk *clk; + struct clk *parent_clk; + + unsigned int num_channels; + unsigned int num_subdevs; + + struct tegra_csi_device *csi; + struct list_head vi_chans; + struct tegra_channel *tpg_start; + void __iomem *iomem; + + struct v4l2_ctrl_handler ctrl_handler; + struct v4l2_ctrl *pattern; + enum tegra_vi_pg_mode pg_mode; + + bool has_sensors; + atomic_t power_on_refcnt; + atomic_t vb2_dma_alloc_refcnt; + struct mutex bw_update_lock; + unsigned long aggregated_kbyteps; + unsigned long max_requested_hz; + struct mutex mipical_lock; + + bool bypass; + + const struct tegra_vi_fops *fops; +}; + +int tegra_vi_get_port_info(struct tegra_channel *chan, + struct device_node *node, unsigned int index); +void tegra_vi_v4l2_cleanup(struct tegra_mc_vi *vi); +int tegra_vi_v4l2_init(struct tegra_mc_vi *vi); +int tegra_vi_tpg_graph_init(struct tegra_mc_vi *vi); +int tegra_vi_graph_init(struct tegra_mc_vi *vi); +void tegra_vi_graph_cleanup(struct tegra_mc_vi *vi); +int tegra_channel_init(struct tegra_channel *chan); +void tegra_vi_channels_unregister(struct tegra_mc_vi *vi); +int tegra_vi_channels_init(struct tegra_mc_vi *vi); +int tegra_channel_cleanup(struct tegra_channel *chan); +int tegra_vi_channels_cleanup(struct tegra_mc_vi *vi); +int tegra_channel_init_subdevices(struct tegra_channel *chan); +void tegra_channel_remove_subdevices(struct tegra_channel *chan); +struct v4l2_subdev *tegra_channel_find_linked_csi_subdev( + struct tegra_channel *chan); +int tegra_vi2_power_on(struct tegra_mc_vi *vi); +void tegra_vi2_power_off(struct tegra_mc_vi *vi); +int tegra_vi4_power_on(struct tegra_mc_vi *vi); +void tegra_vi4_power_off(struct tegra_mc_vi *vi); +int tegra_vi5_enable(struct tegra_mc_vi *vi); +void tegra_vi5_disable(struct tegra_mc_vi *vi); +int tegra_clean_unlinked_channels(struct tegra_mc_vi *vi); +int tegra_channel_s_ctrl(struct v4l2_ctrl *ctrl); +int tegra_vi_media_controller_init(struct tegra_mc_vi *mc_vi, + struct platform_device *pdev); +int tegra_capture_vi_media_controller_init(struct tegra_mc_vi *mc_vi, + struct platform_device *pdev); +void tegra_vi_media_controller_cleanup(struct tegra_mc_vi *mc_vi); +void tegra_channel_ec_close(struct tegra_mc_vi *mc_vi); +void tegra_channel_query_hdmiin_unplug(struct tegra_channel *chan, + struct v4l2_event *event); +int tegra_vi_mfi_work(struct tegra_mc_vi *vi, int csiport); +int tpg_vi_media_controller_init(struct tegra_mc_vi *mc_vi, int pg_mode); +void tpg_vi_media_controller_cleanup(struct tegra_mc_vi *mc_vi); +struct tegra_mc_vi *tegra_get_mc_vi(void); + +u32 tegra_core_get_fourcc_by_idx(struct tegra_channel *chan, + unsigned int index); +int tegra_core_get_idx_by_code(struct tegra_channel *chan, + unsigned int code, unsigned offset); +int tegra_core_get_code_by_fourcc(struct tegra_channel *chan, + unsigned int fourcc, unsigned offset); +const struct tegra_video_format *tegra_core_get_format_by_code( + struct tegra_channel *chan, + unsigned int code, unsigned offset); +const struct tegra_video_format *tegra_core_get_format_by_fourcc( + struct tegra_channel *chan, u32 fourcc); +void tegra_channel_queued_buf_done(struct tegra_channel *chan, + enum vb2_buffer_state state, bool multi_queue); +int tegra_channel_set_stream(struct tegra_channel *chan, bool on); +int tegra_channel_write_blobs(struct tegra_channel *chan); +void tegra_channel_ring_buffer(struct tegra_channel *chan, + struct vb2_v4l2_buffer *vb, + struct timespec64 *ts, int state); +struct tegra_channel_buffer *dequeue_buffer(struct tegra_channel *chan, + bool requeue); +struct tegra_channel_buffer *dequeue_dequeue_buffer(struct tegra_channel *chan); +int tegra_channel_error_recover(struct tegra_channel *chan, bool queue_error); +int tegra_channel_alloc_buffer_queue(struct tegra_channel *chan, + unsigned int num_buffers); +void tegra_channel_dealloc_buffer_queue(struct tegra_channel *chan); +void tegra_channel_init_ring_buffer(struct tegra_channel *chan); +void free_ring_buffers(struct tegra_channel *chan, int frames); +void release_buffer(struct tegra_channel *chan, + struct tegra_channel_buffer *buf); +void set_timestamp(struct tegra_channel_buffer *buf, + const struct timespec64 *ts); +void enqueue_inflight(struct tegra_channel *chan, + struct tegra_channel_buffer *buf); +struct tegra_channel_buffer *dequeue_inflight(struct tegra_channel *chan); +int tegra_channel_set_power(struct tegra_channel *chan, bool on); + +int tegra_channel_init_video(struct tegra_channel *chan); +int tegra_channel_cleanup_video(struct tegra_channel *chan); + +struct tegra_vi_fops { + int (*vi_power_on)(struct tegra_channel *chan); + void (*vi_power_off)(struct tegra_channel *chan); + int (*vi_start_streaming)(struct vb2_queue *vq, u32 count); + int (*vi_stop_streaming)(struct vb2_queue *vq); + int (*vi_setup_queue)(struct tegra_channel *chan, + unsigned int *nbuffers); + int (*vi_error_recover)(struct tegra_channel *chan, bool queue_error); + int (*vi_add_ctrls)(struct tegra_channel *chan); + void (*vi_init_video_formats)(struct tegra_channel *chan); + long (*vi_default_ioctl)(struct file *file, void *fh, + bool use_prio, unsigned int cmd, void *arg); + int (*vi_mfi_work)(struct tegra_mc_vi *vi, int port); + void (*vi_stride_align)(unsigned int *bpl); + void (*vi_unit_get_device_handle)(struct platform_device *pdev, + uint32_t csi_steam_id, struct device **dev); +}; + +struct tegra_csi_fops { + int (*csi_power_on)(struct tegra_csi_device *csi); + int (*csi_power_off)(struct tegra_csi_device *csi); + int (*csi_start_streaming)(struct tegra_csi_channel *chan, + int port_idx); + void (*csi_stop_streaming)(struct tegra_csi_channel *chan, + int port_idx); + void (*csi_override_format)(struct tegra_csi_channel *chan, + int port_idx); + int (*csi_error_recover)(struct tegra_csi_channel *chan, int port_idx); + int (*mipical)(struct tegra_csi_channel *chan); + int (*hw_init)(struct tegra_csi_device *csi); + int (*tpg_set_gain)(struct tegra_csi_channel *chan, int gain_ratio_tpg); +}; + +struct tegra_t210_vi_data { + struct nvhost_device_data *info; + const struct tegra_vi_fops *vi_fops; + const struct tegra_csi_fops *csi_fops; +}; + +struct tegra_vi_data { + struct nvhost_device_data *info; + const struct tegra_vi_fops *vi_fops; +}; + +struct tegra_csi_data { + struct nvhost_device_data *info; + const struct tegra_csi_fops *csi_fops; +}; +#endif diff --git a/include/media/nvc_focus.h b/include/media/nvc_focus.h new file mode 100644 index 00000000..29fbc474 --- /dev/null +++ b/include/media/nvc_focus.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2022 NVIDIA Corporation. All rights reserved. + */ + +#ifndef __NVC_FOCUS_H__ +#define __NVC_FOCUS_H__ + +/* NVC_FOCUS_CAP_VER0: invalid */ +/* NVC_FOCUS_CAP_VER1: + * __u32 version + * __u32 actuator_range + * __u32 settle_time + */ +#define NVC_FOCUS_CAP_VER1 1 +/* NVC_FOCUS_CAP_VER2 adds: + * __u32 focus_macro; + * __u32 focus_hyper; + * __u32 focus_infinity; + */ +#define NVC_FOCUS_CAP_VER2 2 +#define NVC_FOCUS_CAP_VER 2 /* latest version */ + +#define AF_POS_INVALID_VALUE INT_MAX + +/* These are the slew rate values coming down from the configuration */ +/* Disabled is the same as fastest. Default is the default */ +/* slew rate configuration in the focuser */ +#define SLEW_RATE_DISABLED 0 +#define SLEW_RATE_DEFAULT 1 +#define SLEW_RATE_SLOWEST 9 + + +enum nvc_focus_sts { + NVC_FOCUS_STS_UNKNOWN = 1, + NVC_FOCUS_STS_NO_DEVICE, + NVC_FOCUS_STS_INITIALIZING, + NVC_FOCUS_STS_INIT_ERR, + NVC_FOCUS_STS_WAIT_FOR_MOVE_END, + NVC_FOCUS_STS_WAIT_FOR_SETTLE, + NVC_FOCUS_STS_LENS_SETTLED, + NVC_FOCUS_STS_FORCE32 = 0x7FFFFFFF +}; + +struct nvc_focus_nvc { + __u32 focal_length; + __u32 fnumber; + __u32 max_aperture; +} __packed; + +struct nvc_focus_cap { + __u32 version; + __s32 actuator_range; + __u32 settle_time; + __s32 focus_macro; + __s32 focus_hyper; + __s32 focus_infinity; + __u32 slew_rate; + __u32 position_translate; +} __packed; + + +#define NV_FOCUSER_SET_MAX 10 +#define NV_FOCUSER_SET_DISTANCE_PAIR 16 + +struct nv_focuser_set_dist_pairs { + __s32 fdn; + __s32 distance; +} __packed; + +struct nv_focuser_set { + __s32 posture; + __s32 macro; + __s32 hyper; + __s32 inf; + __s32 hysteresis; + __u32 settle_time; + __s32 macro_offset; + __s32 inf_offset; + __u32 num_dist_pairs; + struct nv_focuser_set_dist_pairs + dist_pair[NV_FOCUSER_SET_DISTANCE_PAIR]; +} __packed; + +struct nv_focuser_config { + __u32 focal_length; + __u32 fnumber; + __u32 max_aperture; + __u32 range_ends_reversed; + __s32 pos_working_low; + __s32 pos_working_high; + __s32 pos_actual_low; + __s32 pos_actual_high; + __u32 slew_rate; + __u32 circle_of_confusion; + __u32 num_focuser_sets; + struct nv_focuser_set focuser_set[NV_FOCUSER_SET_MAX]; +} __packed; + + +#endif /* __NVC_FOCUS_H__ */ + diff --git a/include/media/sensor_common.h b/include/media/sensor_common.h new file mode 100644 index 00000000..4b8f84ac --- /dev/null +++ b/include/media/sensor_common.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sensor_common.h - utilities for tegra camera driver + * + * Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __sensor_common__ +#define __sensor_common__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +struct sensor_properties { + struct sensor_cfg cfg; + /* sensor_modes points to an array of mode properties */ + struct sensor_mode_properties *sensor_modes; + u32 num_modes; +}; + +int sensor_common_parse_num_modes(const struct device *dev); +int sensor_common_init_sensor_properties( + struct device *dev, struct device_node *node, + struct sensor_properties *sensor); + +#endif /* __sensor_common__ */ diff --git a/include/media/tegra-v4l2-camera.h b/include/media/tegra-v4l2-camera.h new file mode 100644 index 00000000..64d28d33 --- /dev/null +++ b/include/media/tegra-v4l2-camera.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TEGRA_V4L2_CAMERA.h - utilities for tegra camera driver + * + * Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __TEGRA_V4L2_CAMERA__ +#define __TEGRA_V4L2_CAMERA__ + +#include + +#define TEGRA_CAMERA_CID_BASE (V4L2_CTRL_CLASS_CAMERA | 0x2000) + +#define TEGRA_CAMERA_CID_FRAME_LENGTH (TEGRA_CAMERA_CID_BASE+0) +#define TEGRA_CAMERA_CID_COARSE_TIME (TEGRA_CAMERA_CID_BASE+1) +#define TEGRA_CAMERA_CID_COARSE_TIME_SHORT (TEGRA_CAMERA_CID_BASE+2) +#define TEGRA_CAMERA_CID_GROUP_HOLD (TEGRA_CAMERA_CID_BASE+3) +#define TEGRA_CAMERA_CID_HDR_EN (TEGRA_CAMERA_CID_BASE+4) +#define TEGRA_CAMERA_CID_EEPROM_DATA (TEGRA_CAMERA_CID_BASE+5) +#define TEGRA_CAMERA_CID_OTP_DATA (TEGRA_CAMERA_CID_BASE+6) +#define TEGRA_CAMERA_CID_FUSE_ID (TEGRA_CAMERA_CID_BASE+7) +#define TEGRA_CAMERA_CID_SENSOR_MODE_ID (TEGRA_CAMERA_CID_BASE+8) + +#define TEGRA_CAMERA_CID_GAIN (TEGRA_CAMERA_CID_BASE+9) +#define TEGRA_CAMERA_CID_EXPOSURE (TEGRA_CAMERA_CID_BASE+10) +#define TEGRA_CAMERA_CID_FRAME_RATE (TEGRA_CAMERA_CID_BASE+11) +#define TEGRA_CAMERA_CID_EXPOSURE_SHORT (TEGRA_CAMERA_CID_BASE+12) +#define TEGRA_CAMERA_CID_STEREO_EEPROM (TEGRA_CAMERA_CID_BASE+13) + +#define TEGRA_CAMERA_CID_SENSOR_CONFIG (TEGRA_CAMERA_CID_BASE+50) +#define TEGRA_CAMERA_CID_SENSOR_MODE_BLOB (TEGRA_CAMERA_CID_BASE+51) +#define TEGRA_CAMERA_CID_SENSOR_CONTROL_BLOB (TEGRA_CAMERA_CID_BASE+52) + +#define TEGRA_CAMERA_CID_GAIN_TPG (TEGRA_CAMERA_CID_BASE+70) +#define TEGRA_CAMERA_CID_GAIN_TPG_EMB_DATA_CFG (TEGRA_CAMERA_CID_BASE+71) + +#define TEGRA_CAMERA_CID_VI_BYPASS_MODE (TEGRA_CAMERA_CID_BASE+100) +#define TEGRA_CAMERA_CID_OVERRIDE_ENABLE (TEGRA_CAMERA_CID_BASE+101) +#define TEGRA_CAMERA_CID_VI_HEIGHT_ALIGN (TEGRA_CAMERA_CID_BASE+102) +#define TEGRA_CAMERA_CID_VI_SIZE_ALIGN (TEGRA_CAMERA_CID_BASE+103) +#define TEGRA_CAMERA_CID_WRITE_ISPFORMAT (TEGRA_CAMERA_CID_BASE+104) + +#define TEGRA_CAMERA_CID_SENSOR_SIGNAL_PROPERTIES (TEGRA_CAMERA_CID_BASE+105) +#define TEGRA_CAMERA_CID_SENSOR_IMAGE_PROPERTIES (TEGRA_CAMERA_CID_BASE+106) +#define TEGRA_CAMERA_CID_SENSOR_CONTROL_PROPERTIES (TEGRA_CAMERA_CID_BASE+107) +#define TEGRA_CAMERA_CID_SENSOR_DV_TIMINGS (TEGRA_CAMERA_CID_BASE+108) +#define TEGRA_CAMERA_CID_LOW_LATENCY (TEGRA_CAMERA_CID_BASE+109) +#define TEGRA_CAMERA_CID_VI_PREFERRED_STRIDE (TEGRA_CAMERA_CID_BASE+110) + +/** + * This is temporary with the current v4l2 infrastructure + * currently discussing with upstream maintainers our proposals and + * better approaches to resolve this + */ +#define TEGRA_CAMERA_CID_SENSOR_MODES (TEGRA_CAMERA_CID_BASE + 130) + +#define MAX_BUFFER_SIZE 32 +#define MAX_CID_CONTROLS 32 +#define MAX_NUM_SENSOR_MODES 30 +#define OF_MAX_STR_LEN 256 +#define OF_SENSORMODE_PREFIX ("mode") + +/* + * Scaling factor for converting a Q10.22 fixed point value + * back to its original floating point value + */ +#define FIXED_POINT_SCALING_FACTOR (1ULL << 22) + +#define TEGRA_CAM_MAX_STRING_CONTROLS 8 +#define TEGRA_CAM_STRING_CTRL_EEPROM_INDEX 0 +#define TEGRA_CAM_STRING_CTRL_FUSEID_INDEX 1 +#define TEGRA_CAM_STRING_CTRL_OTP_INDEX 2 + +#define TEGRA_CAM_MAX_COMPOUND_CONTROLS 4 +#define TEGRA_CAM_COMPOUND_CTRL_EEPROM_INDEX 0 + +#define CSI_PHY_MODE_DPHY 0 +#define CSI_PHY_MODE_CPHY 1 +#define SLVS_EC 2 + +struct unpackedU64 { + __u32 high; + __u32 low; +}; + +union __u64val { + struct unpackedU64 unpacked; + __u64 val; +}; + +struct sensor_signal_properties { + __u32 readout_orientation; + __u32 num_lanes; + __u32 mclk_freq; + union __u64val pixel_clock; + __u32 cil_settletime; + __u32 discontinuous_clk; + __u32 dpcm_enable; + __u32 tegra_sinterface; + __u32 phy_mode; + __u32 deskew_initial_enable; + __u32 deskew_periodic_enable; + union __u64val serdes_pixel_clock; + union __u64val mipi_clock; +}; + +struct sensor_image_properties { + __u32 width; + __u32 height; + __u32 line_length; + __u32 pixel_format; + __u32 embedded_metadata_height; + __u32 reserved[11]; +}; + +struct sensor_dv_timings { + __u32 hfrontporch; + __u32 hsync; + __u32 hbackporch; + __u32 vfrontporch; + __u32 vsync; + __u32 vbackporch; + __u32 reserved[10]; +}; + +struct sensor_control_properties { + __u32 gain_factor; + __u32 framerate_factor; + __u32 inherent_gain; + __u32 min_gain_val; + __u32 max_gain_val; + __u32 min_hdr_ratio; + __u32 max_hdr_ratio; + __u32 min_framerate; + __u32 max_framerate; + union __u64val min_exp_time; + union __u64val max_exp_time; + __u32 step_gain_val; + __u32 step_framerate; + __u32 exposure_factor; + union __u64val step_exp_time; + __u32 default_gain; + __u32 default_framerate; + union __u64val default_exp_time; + __u32 is_interlaced; + __u32 interlace_type; + __u32 reserved[10]; +}; + +struct sensor_mode_properties { + struct sensor_signal_properties signal_properties; + struct sensor_image_properties image_properties; + struct sensor_control_properties control_properties; + struct sensor_dv_timings dv_timings; +}; + +#define SENSOR_SIGNAL_PROPERTIES_CID_SIZE \ + (sizeof(struct sensor_signal_properties) / sizeof(__u32)) +#define SENSOR_IMAGE_PROPERTIES_CID_SIZE \ + (sizeof(struct sensor_image_properties) / sizeof(__u32)) +#define SENSOR_CONTROL_PROPERTIES_CID_SIZE \ + (sizeof(struct sensor_control_properties) / sizeof(__u32)) +#define SENSOR_DV_TIMINGS_CID_SIZE \ + (sizeof(struct sensor_dv_timings) / sizeof(__u32)) +#define SENSOR_MODE_PROPERTIES_CID_SIZE \ + (sizeof(struct sensor_mode_properties) / sizeof(__u32)) +#define SENSOR_CONFIG_SIZE \ + (sizeof(struct sensor_cfg) / sizeof(__u32)) +#define SENSOR_MODE_BLOB_SIZE \ + (sizeof(struct sensor_blob) / sizeof(__u32)) +#define SENSOR_CTRL_BLOB_SIZE \ + (sizeof(struct sensor_blob) / sizeof(__u32)) +#endif /* __TEGRA_V4L2_CAMERA__ */ diff --git a/include/media/tegra_camera_core.h b/include/media/tegra_camera_core.h new file mode 100644 index 00000000..e6d9a2b1 --- /dev/null +++ b/include/media/tegra_camera_core.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * NVIDIA Tegra Video Input Device Driver Core Helpers + * + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __TEGRA_CORE_H__ +#define __TEGRA_CORE_H__ + +#include + +/* Minimum and maximum width and height common to Tegra video input device. */ +#define TEGRA_MIN_WIDTH 32U +#define TEGRA_MAX_WIDTH 32768U +#define TEGRA_MIN_HEIGHT 32U +#define TEGRA_MAX_HEIGHT 32768U +/* Width alignment */ +#define TEGRA_WIDTH_ALIGNMENT 1 +/* Stride alignment */ +#define TEGRA_STRIDE_ALIGNMENT 1 +/* Height alignment */ +#define TEGRA_HEIGHT_ALIGNMENT 1 +/* Size alignment */ +#define TEGRA_SIZE_ALIGNMENT 0 + +/* 1080p resolution as default resolution for test pattern generator */ +#define TEGRA_DEF_WIDTH 1920 +#define TEGRA_DEF_HEIGHT 1080 + +#define TEGRA_VF_DEF MEDIA_BUS_FMT_SRGGB10_1X10 +#define TEGRA_IMAGE_FORMAT_DEF 32 + +enum tegra_image_dt { + TEGRA_IMAGE_DT_YUV420_8 = 24, + TEGRA_IMAGE_DT_YUV420_10, + + TEGRA_IMAGE_DT_YUV420CSPS_8 = 28, + TEGRA_IMAGE_DT_YUV420CSPS_10, + TEGRA_IMAGE_DT_YUV422_8, + TEGRA_IMAGE_DT_YUV422_10, + TEGRA_IMAGE_DT_RGB444, + TEGRA_IMAGE_DT_RGB555, + TEGRA_IMAGE_DT_RGB565, + TEGRA_IMAGE_DT_RGB666, + TEGRA_IMAGE_DT_RGB888, + + TEGRA_IMAGE_DT_RAW6 = 40, + TEGRA_IMAGE_DT_RAW7, + TEGRA_IMAGE_DT_RAW8, + TEGRA_IMAGE_DT_RAW10, + TEGRA_IMAGE_DT_RAW12, + TEGRA_IMAGE_DT_RAW14, +}; + +/* Supported CSI to VI Data Formats */ +enum tegra_vf_code { + TEGRA_VF_RAW6 = 0, + TEGRA_VF_RAW7, + TEGRA_VF_RAW8, + TEGRA_VF_RAW10, + TEGRA_VF_RAW12, + TEGRA_VF_RAW14, + TEGRA_VF_EMBEDDED8, + TEGRA_VF_RGB565, + TEGRA_VF_RGB555, + TEGRA_VF_RGB888, + TEGRA_VF_RGB444, + TEGRA_VF_RGB666, + TEGRA_VF_YUV422, + TEGRA_VF_YUV420, + TEGRA_VF_YUV420_CSPS, +}; + +/** + * struct tegra_frac + * @numerator: numerator of the fraction + * @denominator: denominator of the fraction + */ +struct tegra_frac { + unsigned int numerator; + unsigned int denominator; +}; + +/** + * struct tegra_video_format - Tegra video format description + * @vf_code: video format code + * @width: format width in bits per component + * @code: media bus format code + * @bpp: bytes per pixel fraction (when stored in memory) + * @img_fmt: image format + * @img_dt: image data type + * @fourcc: V4L2 pixel format FCC identifier + * @description: format description, suitable for userspace + */ +struct tegra_video_format { + enum tegra_vf_code vf_code; + unsigned int width; + unsigned int code; + struct tegra_frac bpp; + u32 img_fmt; + enum tegra_image_dt img_dt; + u32 fourcc; + __u8 description[32]; +}; + +#define TEGRA_VIDEO_FORMAT(VF_CODE, BPP, MBUS_CODE, FRAC_BPP_NUM, \ + FRAC_BPP_DEN, FORMAT, DATA_TYPE, FOURCC, DESCRIPTION) \ +{ \ + TEGRA_VF_##VF_CODE, \ + BPP, \ + MEDIA_BUS_FMT_##MBUS_CODE, \ + {FRAC_BPP_NUM, FRAC_BPP_DEN}, \ + TEGRA_IMAGE_FORMAT_##FORMAT, \ + TEGRA_IMAGE_DT_##DATA_TYPE, \ + V4L2_PIX_FMT_##FOURCC, \ + DESCRIPTION, \ +} + +u32 tegra_core_get_word_count(unsigned int frame_width, + const struct tegra_video_format *fmt); +u32 tegra_core_bytes_per_line(unsigned int width, unsigned int align, + const struct tegra_video_format *fmt); +const struct tegra_video_format *tegra_core_get_default_format(void); + +#endif diff --git a/include/media/tegra_camera_dev_mfi.h b/include/media/tegra_camera_dev_mfi.h new file mode 100644 index 00000000..da13512a --- /dev/null +++ b/include/media/tegra_camera_dev_mfi.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __CAMERA_DEV_MFI_H__ +#define __CAMERA_DEV_MFI_H__ + +#include +#include +#include + +#define CAMERA_MAX_NAME_LENGTH 32 +#define CAMERA_REGCACHE_MAX (128) + +struct cam_reg { + u32 addr; + u32 val; +}; + +struct cam_i2c_msg { + struct i2c_msg msg; + u8 buf[8]; +}; + +struct camera_mfi_dev { + char name[CAMERA_MAX_NAME_LENGTH]; + struct regmap *regmap; + struct cam_reg reg[CAMERA_REGCACHE_MAX]; + struct cam_reg prev_reg[CAMERA_REGCACHE_MAX]; + struct i2c_client *i2c_client; + struct cam_i2c_msg msg[CAMERA_REGCACHE_MAX]; + u32 num_used; + u32 prev_num_used; + struct list_head list; +}; + +struct mfi_cb_arg { + u8 vi_chan; +}; + +void tegra_camera_dev_mfi_cb(void *stub); +int tegra_camera_dev_mfi_clear(struct camera_mfi_dev *cmfidev); +int tegra_camera_dev_mfi_wr_add( + struct camera_mfi_dev *cmfidev, u32 offset, u32 val); +int tegra_camera_dev_mfi_wr_add_i2c( + struct camera_mfi_dev *cmfidev, struct i2c_msg *msg, int num); +int tegra_camera_dev_mfi_add_regmap( + struct camera_mfi_dev **cmfidev, u8 *name, struct regmap *regmap); +int tegra_camera_dev_mfi_add_i2cclient( + struct camera_mfi_dev **cmfidev, u8 *name, + struct i2c_client *i2c_client); + +#endif +/* __CAMERA_DEV_MFI_H__ */ diff --git a/include/media/tegra_camera_platform.h b/include/media/tegra_camera_platform.h new file mode 100644 index 00000000..e7f8ed0d --- /dev/null +++ b/include/media/tegra_camera_platform.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _TEGRA_CAMERA_PLATFORM_H_ +#define _TEGRA_CAMERA_PLATFORM_H_ + +#include + +/* avoid overflows */ +#define DEFAULT_PG_CLK_RATE (UINT_MAX - 1) + +/** + * enum tegra_camera_hw_type - camera hw engines + */ +enum tegra_camera_hw_type { + HWTYPE_NONE = 0, + HWTYPE_CSI, + HWTYPE_SLVSEC, + HWTYPE_VI, + HWTYPE_ISPA, + HWTYPE_ISPB, + HWTYPE_MAX, +}; + +/** + * enum tegra_camera_sensor_type - camera sensor types + */ +enum tegra_camera_sensor_type { + SENSORTYPE_NONE = 0, + SENSORTYPE_DPHY, + SENSORTYPE_CPHY, + SENSORTYPE_SLVSEC, + SENSORTYPE_VIRTUAL, + /* HDMI-IN or other inputs */ + SENSORTYPE_OTHER, + SENSORTYPE_MAX, +}; + +/** + * struct tegra_camera_dev_info - camera devices information + * @priv: a unique identifier assigned during registration + * @hw_type: type of HW engine as defined by the enum above + * @bus_width: csi bus width for clock calculation + * @overhead: hw/ sw overhead considered while calculations + * @ppc: HW capability, pixels per clock + * @clk_rate: calculated clk rate for this node + * @actual_clk_rate: clk rate set by nvhost + * @bw: calculated bw for this node + * @use_max: populated by hw engine to decide it's clocking policy + * @memory_latency: latency allowed for memory freq scaling + * @pdev: pointer to platform_data + * @sensor_type: type of sensor as defined by the enum above + * @pixel_rate: pixel rate coming out of the sensor + * @pixel_bit_depth: bits per pixel + * @bpp: bytes per pixel + * @stream_on: stream enabled on the channel + * @device_node: list node + */ +struct tegra_camera_dev_info { + void *priv; + u32 hw_type; + u32 bus_width; + u32 overhead; + u64 lane_speed; + u32 lane_num; + u32 ppc; + u64 clk_rate; + u64 pg_clk_rate; + unsigned long actual_clk_rate; + u64 bw; + bool use_max; + u32 memory_latency; + struct platform_device *pdev; + u32 sensor_type; + u64 pixel_rate; + u32 pixel_bit_depth; + u32 bpp; + bool stream_on; + struct list_head device_node; +}; + +int tegra_camera_update_isobw(void); +int tegra_camera_emc_clk_enable(void); +int tegra_camera_emc_clk_disable(void); +int tegra_camera_device_register(struct tegra_camera_dev_info *cdev_info, + void *priv); +int tegra_camera_device_unregister(void *priv); +int tegra_camera_get_device_list_entry(const u32 hw_type, const void *priv, + struct tegra_camera_dev_info *cdev_info); +int tegra_camera_get_device_list_stats(u32 *n_sensors, u32 *n_hwtypes); +int tegra_camera_update_clknbw(void *priv, bool stream_on); + +#endif diff --git a/include/media/tegra_v4l2_camera.h b/include/media/tegra_v4l2_camera.h new file mode 100644 index 00000000..c8563729 --- /dev/null +++ b/include/media/tegra_v4l2_camera.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _TEGRA_CAMERA_H_ +#define _TEGRA_CAMERA_H_ + +#include +#include + +enum tegra_camera_port { + TEGRA_CAMERA_PORT_CSI_A = 0, + TEGRA_CAMERA_PORT_CSI_B, + TEGRA_CAMERA_PORT_CSI_C, + TEGRA_CAMERA_PORT_CSI_D, + TEGRA_CAMERA_PORT_CSI_E, + TEGRA_CAMERA_PORT_CSI_F, + TEGRA_CAMERA_PORT_VIP, +}; + +struct tegra_camera_platform_data { + int (*enable_camera)(struct platform_device *pdev); + void (*disable_camera)(struct platform_device *pdev); + bool flip_h; + bool flip_v; + enum tegra_camera_port port; + int lanes; /* For CSI port only */ + bool continuous_clk; /* For CSI port only */ +}; + +struct i2c_camera_ctrl { + int (*new_devices)(struct platform_device *pdev); + void (*remove_devices)(struct platform_device *pdev); +}; +#endif /* _TEGRA_CAMERA_H_ */ diff --git a/include/media/tegracam_core.h b/include/media/tegracam_core.h new file mode 100644 index 00000000..3c0e3023 --- /dev/null +++ b/include/media/tegracam_core.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * tegracam_core.h - tegra camera framework core utilities + * + * Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __TEGRACAM_CORE_H__ +#define __TEGRACAM_CORE_H__ + +#include + +struct tegracam_device { + struct camera_common_data *s_data; + struct media_pad pad; + u32 version; + bool is_streaming; + /* variables to be filled by the driver to register */ + char name[32]; + struct i2c_client *client; + struct device *dev; + u32 numctrls; + const u32 *ctrl_cid_list; + const struct regmap_config *dev_regmap_config; + struct camera_common_sensor_ops *sensor_ops; + const struct v4l2_subdev_ops *v4l2sd_ops; + const struct v4l2_subdev_internal_ops *v4l2sd_internal_ops; + const struct media_entity_operations *media_ops; + const struct tegracam_ctrl_ops *tcctrl_ops; + void *priv; +}; + +u32 tegracam_version(u8 major, u8 minor, u8 patch); +u32 tegracam_query_version(const char *of_dev_name); +struct tegracam_device *to_tegracam_device(struct camera_common_data *data); + +void tegracam_set_privdata(struct tegracam_device *tc_dev, void *priv); +void *tegracam_get_privdata(struct tegracam_device *tc_dev); + +int tegracam_v4l2subdev_register(struct tegracam_device *tc_dev, + bool is_sensor); +void tegracam_v4l2subdev_unregister(struct tegracam_device *tc_dev); +int tegracam_device_register(struct tegracam_device *tc_dev); +void tegracam_device_unregister(struct tegracam_device *tc_dev); +#endif diff --git a/include/media/tegracam_utils.h b/include/media/tegracam_utils.h new file mode 100644 index 00000000..3625c2bf --- /dev/null +++ b/include/media/tegracam_utils.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/** + * tegracam_utils.h - tegra camera framework core utilities + * + * Copyright (c) 2018-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __TEGRACAM_UTILS_H__ +#define __TEGRACAM_UTILS_H__ + +#include + +enum sensor_opcode { + SENSOR_OPCODE_DONE = 0, + SENSOR_OPCODE_READ = 1, + SENSOR_OPCODE_WRITE = 2, + SENSOR_OPCODE_SLEEP = 3, +}; + +int convert_table_to_blob(struct sensor_blob *pkt, + const struct reg_8 table[], + u16 wait_ms_addr, u16 end_addr); +int write_sensor_blob(struct regmap *regmap, struct sensor_blob *blob); +int tegracam_write_blobs(struct tegracam_ctrl_handler *hdl); + +bool is_tvcf_supported(u32 version); +int format_tvcf_version(u32 version, char *buff, size_t size); + +void conv_u32_u8arr(u32 val, u8 *buf); +void conv_u16_u8arr(u16 val, u8 *buf); + +int prepare_write_cmd(struct sensor_blob *pkt, + u32 size, u32 addr, u8 *buf); +int prepare_read_cmd(struct sensor_blob *pkt, + u32 size, u32 addr); +int prepare_sleep_cmd(struct sensor_blob *pkt, u32 time_in_us); +int prepare_done_cmd(struct sensor_blob *pkt); + +#endif diff --git a/include/media/vi.h b/include/media/vi.h new file mode 100644 index 00000000..9b0728ea --- /dev/null +++ b/include/media/vi.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra Graphics Host VI + * + * Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __NVHOST_VI_H__ +#define __NVHOST_VI_H__ + +#include + +#include + +#define VI_CFG_INTERRUPT_MASK_0 0x8c +#define VI_CFG_INTERRUPT_STATUS_0 0x98 + +#define CSI_CSI_PIXEL_PARSER_A_INTERRUPT_MASK_0 0x850 +#define CSI_CSI_PIXEL_PARSER_A_STATUS_0 0x854 +#define PPA_FIFO_OVRF (1 << 5) + +#define CSI_CSI_PIXEL_PARSER_B_INTERRUPT_MASK_0 0x884 +#define CSI_CSI_PIXEL_PARSER_B_STATUS_0 0x888 +#define PPB_FIFO_OVRF (1 << 5) + +#define VI_CSI_0_ERROR_STATUS 0x184 +#define VI_CSI_1_ERROR_STATUS 0x284 +#define VI_CSI_0_WD_CTRL 0x18c +#define VI_CSI_1_WD_CTRL 0x28c +#define VI_CSI_0_ERROR_INT_MASK_0 0x188 +#define VI_CSI_1_ERROR_INT_MASK_0 0x288 + +#ifdef TEGRA_21X_OR_HIGHER_CONFIG +#define VI_CSI_2_ERROR_STATUS 0x384 +#define VI_CSI_3_ERROR_STATUS 0x484 +#define VI_CSI_4_ERROR_STATUS 0x584 +#define VI_CSI_5_ERROR_STATUS 0x684 + +#define VI_CSI_2_WD_CTRL 0x38c +#define VI_CSI_3_WD_CTRL 0x48c +#define VI_CSI_4_WD_CTRL 0x58c +#define VI_CSI_5_WD_CTRL 0x68c + +#define VI_CSI_2_ERROR_INT_MASK_0 0x388 +#define VI_CSI_3_ERROR_INT_MASK_0 0x488 +#define VI_CSI_4_ERROR_INT_MASK_0 0x588 +#define VI_CSI_5_ERROR_INT_MASK_0 0x688 + +#define CSI1_CSI_PIXEL_PARSER_A_INTERRUPT_MASK_0 0x1050 +#define CSI1_CSI_PIXEL_PARSER_A_STATUS_0 0x1054 +#define CSI1_CSI_PIXEL_PARSER_B_INTERRUPT_MASK_0 0x1084 +#define CSI1_CSI_PIXEL_PARSER_B_STATUS_0 0x1088 +#define CSI2_CSI_PIXEL_PARSER_A_INTERRUPT_MASK_0 0x1850 +#define CSI2_CSI_PIXEL_PARSER_A_STATUS_0 0x1854 +#define CSI2_CSI_PIXEL_PARSER_B_INTERRUPT_MASK_0 0x1884 +#define CSI2_CSI_PIXEL_PARSER_B_STATUS_0 0x1888 + +#define NUM_VI_WATCHDOG 6 +#else +#define NUM_VI_WATCHDOG 2 +#endif + +typedef void (*callback)(void *); + +struct tegra_vi_stats { + atomic_t overflow; +}; + +struct tegra_vi_mfi_ctx; + +struct vi { + struct tegra_camera *camera; + struct platform_device *ndev; + struct device *dev; + struct tegra_vi_data *data; + struct tegra_mc_vi mc_vi; + struct tegra_csi_device csi; + + struct regulator *reg; + struct dentry *debugdir; + struct tegra_vi_stats vi_out; + struct work_struct stats_work; + struct tegra_vi_mfi_ctx *mfi_ctx; + int vi_irq; + uint vi_bypass_bw; + uint max_bw; + bool master_deinitialized; + bool tpg_opened; + bool sensor_opened; + bool bypass; +}; + +extern const struct file_operations tegra_vi_ctrl_ops; +int nvhost_vi_prepare_poweroff(struct platform_device *); +int nvhost_vi_finalize_poweron(struct platform_device *); + +void nvhost_vi_reset_all(struct platform_device *); +struct vi *tegra_vi_get(void); +int vi_v4l2_set_la(struct vi *tegra_vi, u32 vi_bypass_bw, bool is_ioctl); + +int tegra_vi_register_mfi_cb(callback cb, void *cb_arg); +int tegra_vi_unregister_mfi_cb(void); + +bool tegra_vi_has_mfi_callback(void); +int tegra_vi_mfi_event_notify(struct tegra_vi_mfi_ctx *mfi_ctx, u8 channel); +int tegra_vi_init_mfi(struct tegra_vi_mfi_ctx **mfi_ctx, u8 num_channels); +void tegra_vi_deinit_mfi(struct tegra_vi_mfi_ctx **mfi_ctx); +#endif diff --git a/include/media/vi2_registers.h b/include/media/vi2_registers.h new file mode 100644 index 00000000..4e15ea64 --- /dev/null +++ b/include/media/vi2_registers.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra VI/CSI register offsets + * + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __REGISTERS_H__ +#define __REGISTERS_H__ + +/* VI registers */ +#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT 200 +#define TEGRA_VI_CFG_VI_INCR_SYNCPT 0x000 +#define VI_CFG_VI_INCR_SYNCPT_COND(x) (x << 8) +#define VI_CSI_PP_LINE_START(port) (4 + (port) * 4) +#define VI_CSI_PP_FRAME_START(port) (5 + (port) * 4) +#define VI_CSI_MW_REQ_DONE(port) (6 + (port) * 4) +#define VI_CSI_MW_ACK_DONE(port) (7 + (port) * 4) + +#define TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL 0x004 +#define TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x008 +#define TEGRA_VI_CFG_CTXSW 0x020 +#define TEGRA_VI_CFG_INTSTATUS 0x024 +#define TEGRA_VI_CFG_PWM_CONTROL 0x038 +#define TEGRA_VI_CFG_PWM_HIGH_PULSE 0x03c +#define TEGRA_VI_CFG_PWM_LOW_PULSE 0x040 +#define TEGRA_VI_CFG_PWM_SELECT_PULSE_A 0x044 +#define TEGRA_VI_CFG_PWM_SELECT_PULSE_B 0x048 +#define TEGRA_VI_CFG_PWM_SELECT_PULSE_C 0x04c +#define TEGRA_VI_CFG_PWM_SELECT_PULSE_D 0x050 +#define TEGRA_VI_CFG_VGP1 0x064 +#define TEGRA_VI_CFG_VGP2 0x068 +#define TEGRA_VI_CFG_VGP3 0x06c +#define TEGRA_VI_CFG_VGP4 0x070 +#define TEGRA_VI_CFG_VGP5 0x074 +#define TEGRA_VI_CFG_VGP6 0x078 +#define TEGRA_VI_CFG_INTERRUPT_MASK 0x08c +#define TEGRA_VI_CFG_INTERRUPT_TYPE_SELECT 0x090 +#define TEGRA_VI_CFG_INTERRUPT_POLARITY_SELECT 0x094 +#define TEGRA_VI_CFG_INTERRUPT_STATUS 0x098 +#define TEGRA_VI_CFG_VGP_SYNCPT_CONFIG 0x0ac +#define TEGRA_VI_CFG_VI_SW_RESET 0x0b4 +#define TEGRA_VI_CFG_CG_CTRL 0x0b8 +#define VI_CG_2ND_LEVEL_EN 0x1 +#define TEGRA_VI_CFG_VI_MCCIF_FIFOCTRL 0x0e4 +#define TEGRA_VI_CFG_TIMEOUT_WCOAL_VI 0x0e8 +#define TEGRA_VI_CFG_DVFS 0x0f0 +#define TEGRA_VI_CFG_RESERVE 0x0f4 +#define TEGRA_VI_CFG_RESERVE_1 0x0f8 + +/* CSI registers */ +#define TEGRA_VI_CSI_BASE(x) (0x100 + (x) * 0x100) + +#define TEGRA_VI_CSI_SW_RESET 0x000 +#define TEGRA_VI_CSI_SINGLE_SHOT 0x004 +#define SINGLE_SHOT_CAPTURE 0x1 +#define CAPTURE_GOOD_FRAME 0x1 +#define TEGRA_VI_CSI_SINGLE_SHOT_STATE_UPDATE 0x008 +#define TEGRA_VI_CSI_IMAGE_DEF 0x00c +#define BYPASS_PXL_TRANSFORM_OFFSET 24 +#define IMAGE_DEF_FORMAT_OFFSET 16 +#define IMAGE_DEF_DEST_MEM 0x1 +#define TEGRA_VI_CSI_RGB2Y_CTRL 0x010 +#define TEGRA_VI_CSI_MEM_TILING 0x014 +#define TEGRA_VI_CSI_IMAGE_SIZE 0x018 +#define IMAGE_SIZE_HEIGHT_OFFSET 16 +#define TEGRA_VI_CSI_IMAGE_SIZE_WC 0x01c +#define TEGRA_VI_CSI_IMAGE_DT 0x020 +#define TEGRA_VI_CSI_SURFACE0_OFFSET_MSB 0x024 +#define TEGRA_VI_CSI_SURFACE0_OFFSET_LSB 0x028 +#define TEGRA_VI_CSI_SURFACE1_OFFSET_MSB 0x02c +#define TEGRA_VI_CSI_SURFACE1_OFFSET_LSB 0x030 +#define TEGRA_VI_CSI_SURFACE2_OFFSET_MSB 0x034 +#define TEGRA_VI_CSI_SURFACE2_OFFSET_LSB 0x038 +#define TEGRA_VI_CSI_SURFACE0_BF_OFFSET_MSB 0x03c +#define TEGRA_VI_CSI_SURFACE0_BF_OFFSET_LSB 0x040 +#define TEGRA_VI_CSI_SURFACE1_BF_OFFSET_MSB 0x044 +#define TEGRA_VI_CSI_SURFACE1_BF_OFFSET_LSB 0x048 +#define TEGRA_VI_CSI_SURFACE2_BF_OFFSET_MSB 0x04c +#define TEGRA_VI_CSI_SURFACE2_BF_OFFSET_LSB 0x050 +#define TEGRA_VI_CSI_SURFACE0_STRIDE 0x054 +#define TEGRA_VI_CSI_SURFACE1_STRIDE 0x058 +#define TEGRA_VI_CSI_SURFACE2_STRIDE 0x05c +#define TEGRA_VI_CSI_SURFACE_HEIGHT0 0x060 +#define TEGRA_VI_CSI_ISPINTF_CONFIG 0x064 +#define TEGRA_VI_CSI_ERROR_STATUS 0x084 +#define TEGRA_VI_CSI_ERROR_INT_MASK 0x088 +#define TEGRA_VI_CSI_WD_CTRL 0x08c +#define TEGRA_VI_CSI_WD_PERIOD 0x090 + +/* CSI Pixel Parser registers: Starts from 0x838, offset 0x0 */ +#define TEGRA_CSI_INPUT_STREAM_CONTROL 0x000 +#define CSI_SKIP_PACKET_THRESHOLD_OFFSET 16 + +#define TEGRA_CSI_PIXEL_STREAM_CONTROL0 0x004 +#define CSI_PP_PACKET_HEADER_SENT (0x1 << 4) +#define CSI_PP_DATA_IDENTIFIER_ENABLE (0x1 << 5) +#define CSI_PP_WORD_COUNT_SELECT_HEADER (0x1 << 6) +#define CSI_PP_CRC_CHECK_ENABLE (0x1 << 7) +#define CSI_PP_WC_CHECK (0x1 << 8) +#define CSI_PP_OUTPUT_FORMAT_STORE (0x3 << 16) +#define CSI_PPA_PAD_LINE_NOPAD (0x2 << 24) +#define CSI_PP_HEADER_EC_DISABLE (0x1 << 27) +#define CSI_PPA_PAD_FRAME_NOPAD (0x2 << 28) + +#define TEGRA_CSI_PIXEL_STREAM_CONTROL1 0x008 +#define CSI_PP_TOP_FIELD_FRAME_OFFSET 0 +#define CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET 4 + +#define TEGRA_CSI_PIXEL_STREAM_GAP 0x00c +#define PP_FRAME_MIN_GAP_OFFSET 16 + +#define TEGRA_CSI_PIXEL_STREAM_PP_COMMAND 0x010 +#define CSI_PP_ENABLE 0x1 +#define CSI_PP_DISABLE 0x2 +#define CSI_PP_RST 0x3 +#define CSI_PP_SINGLE_SHOT_ENABLE (0x1 << 2) +#define CSI_PP_START_MARKER_FRAME_MAX_OFFSET 12 + +#define TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME 0x014 +#define TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK 0x018 +#define TEGRA_CSI_PIXEL_PARSER_STATUS 0x01c +#define TEGRA_CSI_CSI_SW_SENSOR_RESET 0x020 + +/* CSI PHY registers */ +/* CSI_PHY_CIL_COMMAND_0 offset 0x0d0 from TEGRA_CSI_PIXEL_PARSER_0_BASE */ +#define TEGRA_CSI_PHY_CIL_COMMAND 0x0d0 +#define CSI_A_PHY_CIL_NOP 0x0 +#define CSI_A_PHY_CIL_ENABLE 0x1 +#define CSI_A_PHY_CIL_DISABLE 0x2 +#define CSI_A_PHY_CIL_ENABLE_MASK 0x3 +#define CSI_B_PHY_CIL_NOP (0x0 << 8) +#define CSI_B_PHY_CIL_ENABLE (0x1 << 8) +#define CSI_B_PHY_CIL_DISABLE (0x2 << 8) +#define CSI_B_PHY_CIL_ENABLE_MASK (0x3 << 8) + +/* CSI CIL registers: Starts from 0x92c, offset 0xF4 */ +#define TEGRA_CSI_CIL_OFFSET 0x0f4 + +#define TEGRA_CSI_CIL_PAD_CONFIG0 0x000 +#define BRICK_CLOCK_A_4X (0x1 << 16) +#define BRICK_CLOCK_B_4X (0x2 << 16) +#define TEGRA_CSI_CIL_PAD_CONFIG1 0x004 +#define TEGRA_CSI_CIL_PHY_CONTROL 0x008 +#define BYPASS_LP_SEQ (0x1 << 6) +#define TEGRA_CSI_CIL_INTERRUPT_MASK 0x00c +#define TEGRA_CSI_CIL_STATUS 0x010 +#define TEGRA_CSI_CILX_STATUS 0x014 +#define TEGRA_CSI_CIL_ESCAPE_MODE_COMMAND 0x018 +#define TEGRA_CSI_CIL_ESCAPE_MODE_DATA 0x01c +#define TEGRA_CSI_CIL_SW_SENSOR_RESET 0x020 + +/* CSI Pattern Generator registers: Starts from 0x9c4, offset 0x18c */ +#define TEGRA_CSI_TPG_OFFSET 0x18c + +#define TEGRA_CSI_PATTERN_GENERATOR_CTRL 0x000 +#define PG_MODE_OFFSET 2 +#define PG_ENABLE 0x1 +#define PG_DISABLE 0x0 + +#define PG_VBLANK_OFFSET 16 +#define TEGRA_CSI_PG_BLANK 0x004 +#define TEGRA_CSI_PG_PHASE 0x008 +#define TEGRA_CSI_PG_RED_FREQ 0x00c +#define PG_RED_VERT_INIT_FREQ_OFFSET 16 +#define PG_RED_HOR_INIT_FREQ_OFFSET 0 + +#define TEGRA_CSI_PG_RED_FREQ_RATE 0x010 +#define TEGRA_CSI_PG_GREEN_FREQ 0x014 +#define PG_GREEN_VERT_INIT_FREQ_OFFSET 16 +#define PG_GREEN_HOR_INIT_FREQ_OFFSET 0 + +#define TEGRA_CSI_PG_GREEN_FREQ_RATE 0x018 +#define TEGRA_CSI_PG_BLUE_FREQ 0x01c +#define PG_BLUE_VERT_INIT_FREQ_OFFSET 16 +#define PG_BLUE_HOR_INIT_FREQ_OFFSET 0 + +#define TEGRA_CSI_PG_BLUE_FREQ_RATE 0x020 +#define TEGRA_CSI_PG_AOHDR 0x024 + +#define TEGRA_CSI_DPCM_CTRL_A 0xa2c +#define TEGRA_CSI_DPCM_CTRL_B 0xa30 + +/* Other CSI registers: Starts from 0xa44, offset 0x20c */ +#define TEGRA_CSI_STALL_COUNTER 0x20c +#define TEGRA_CSI_CSI_READONLY_STATUS 0x210 +#define TEGRA_CSI_CSI_SW_STATUS_RESET 0x214 +#define TEGRA_CSI_CLKEN_OVERRIDE 0x218 +#define TEGRA_CSI_DEBUG_CONTROL 0x21c +#define TEGRA_CSI_DEBUG_COUNTER_0 0x220 +#define TEGRA_CSI_DEBUG_COUNTER_1 0x224 +#define TEGRA_CSI_DEBUG_COUNTER_2 0x228 + + +/* CSI Pixel Parser registers */ +#define TEGRA_CSI_PIXEL_PARSER_0_BASE 0x0838 +#define TEGRA_CSI_PIXEL_PARSER_1_BASE 0x086c +#define TEGRA_CSI_PIXEL_PARSER_2_BASE 0x1038 +#define TEGRA_CSI_PIXEL_PARSER_3_BASE 0x106c +#define TEGRA_CSI_PIXEL_PARSER_4_BASE 0x1838 +#define TEGRA_CSI_PIXEL_PARSER_5_BASE 0x186c + +/* CSIA to CSIB register offset */ +#define TEGRA_CSI_PORT_OFFSET 0x34 + +#define INVALID_CSI_PORT 0xFF +#define TEGRA_CSI_BLOCKS 3 +#define SYNCPT_FIFO_DEPTH 2 +#define PREVIOUS_BUFFER_DEC_INDEX 2 + +#define TEGRA_CLOCK_VI_MAX 793600000 +#define TEGRA_CLOCK_TPG 927000000 +#define TEGRA_CLOCK_CSI_PORT_MAX 102000000 + +#define TEGRA_SURFACE_ALIGNMENT 64 +#endif diff --git a/include/media/vi4_registers.h b/include/media/vi4_registers.h new file mode 100644 index 00000000..dcd891cf --- /dev/null +++ b/include/media/vi4_registers.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra 18x VI register offsets + * + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __VI4_REGISTERS_H__ +#define __VI4_REGISTERS_H__ + +/* VI registers. Start from 0x0 */ +#define VI_STREAMS (6) +#define VIRTUAL_CHANNELS (4) +#define VI4_CHANNEL_OFFSET 0x10000 + +#define CFG_INTERRUPT_STATUS 0x44 +#define CFG_INTERRUPT_MASK 0x48 +#define VGP6_INT_MASK (0x1 << 29) +#define VGP5_INT_MASK (0x1 << 28) +#define VGP4_INT_MASK (0x1 << 27) +#define VGP3_INT_MASK (0x1 << 26) +#define VGP2_INT_MASK (0x1 << 25) +#define VGP1_INT_MASK (0x1 << 24) +#define HOST_PKTINJECT_STALL_ERR_MASK (0x1 << 7) +#define CSIMUX_FIFO_OVFL_ERR_MASK (0x1 << 6) +#define ATOMP_PACKER_OVFL_ERR_MASK (0x1 << 5) +#define FMLITE_BUF_OVFL_ERR_MASK (0x1 << 4) +#define NOTIFY_FIFO_OVFL_ERR_MASK (0x1 << 3) +#define ISPBUFA_ERR_MASK (0x1 << 0) + +#define CFG_PWM_HIGH_PULSE 0x50 +#define PWM_HIGH_PULSE (0xffffffff << 0) + +#define CSIMUX_CONFIG_STREAM_0 0x424 +#define CSIMUX_CONFIG_STREAM_1 0x428 +#define CSIMUX_CONFIG_STREAM_2 0x42C +#define CSIMUX_CONFIG_STREAM_3 0x430 +#define CSIMUX_CONFIG_STREAM_4 0x434 +#define CSIMUX_CONFIG_STREAM_5 0x438 +#define FRAMEIDGEN (0xf << 26) +#define STICKYFAULT (0x1 << 25) +#define VPR (0x1 << 24) +#define SRESET (0x1 << 23) +#define QBLOCK (0x1 << 22) +#define FEINJECT (0x1 << 21) +#define FESHORTTIMER (0x1 << 20) +#define FEMAXTIME (0xffff << 4) +#define WT (0xf << 0) + +#define NOTIFY_FIFO_TAG_0 0x4000 +#define NOTIFY_FRAME_ID (0xffff << 16) +#define NOTIFY_CHANNEL (0xff << 8) +#define NOTIFY_CHANNEL_SHIFT (8) +#define NOTIFY_TAG (0x1f << 1) +#define NOTIFY_TAG_SHIFT (1) +#define NOTIFY_VALID (0x1 << 0) + +#define TAG_FS 0 +#define TAG_FE 1 +#define TAG_CSIMUX_FRAME 2 +#define TAG_CSIMUX_STREAM 3 +#define TAG_CHANSEL_PXL_SOF 4 +#define TAG_CHANSEL_PXL_EOF 5 +#define TAG_CHANSEL_EMBED_SOF 6 +#define TAG_CHANSEL_EMBED_EOF 7 +#define TAG_CHANSEL_NLINES 8 +#define TAG_CHANSEL_FAULT 9 +#define TAG_CHANSEL_FAULT_FE 10 +#define TAG_CHANSEL_NOMATCH 11 +#define TAG_CHANSEL_COLLISION 12 +#define TAG_CHANSEL_SHORT_FRAME 13 +#define TAG_CHANSEL_LOAD_FRAMED 14 +#define TAG_ATOMP_PACKER_OVERFLOW 15 +#define TAG_ATOMP_FS 16 +#define TAG_ATOMP_FE 17 +#define TAG_ATOMP_FRAME_DONE 18 +#define TAG_ATOMP_EMB_DATA_DONE 19 +#define TAG_ATOMP_FRAME_NLINES_DONE 20 +#define TAG_ATOMP_FRAME_TRUNCATED 21 +#define TAG_ATOMP_FRAME_TOSSED 22 +#define TAG_ATOMP_PDAF_DATA_DONE 23 +#define TAG_ISPBUF_FIFO_OVERFLOW 26 +#define TAG_ISPBUF_FS 27 +#define TAG_ISPBUF_FE 28 +#define TAG_VGP0_DONE 29 +#define TAG_VGP1_DONE 30 +#define TAG_FMLITE_DONE 31 + +#define NOTIFY_FIFO_TIMESTAMP_0 0x4004 +#define NOTIFY_TIMESTAMP (0xffffffff << 0) + +#define NOTIFY_FIFO_DATA_0 0x4008 +#define NOTIFY_DATA (0xffffffff << 0) + +#define NOTIFY_TAG_CLASSIFY_0 0x6000 +#define NOTIFY_TAG_CLASSIFY_1 0x6004 +#define NOTIFY_TAG_CLASSIFY_2 0x6008 +#define NOTIFY_TAG_CLASSIFY_3 0x600c +#define NOTIFY_TAG_CLASSIFY_4 0x6010 +#define STREAM5_FEINJECT_VC (0xf << 20) +#define STREAM4_FEINJECT_VC (0xf << 16) +#define STREAM3_FEINJECT_VC (0xf << 12) +#define STREAM2_FEINJECT_VC (0xf << 8) +#define STREAM1_FEINJECT_VC (0xf << 4) +#define STREAM0_FEINJECT_VC (0xf << 0) + +#define NOTIFY_FIFO_OCCUPANCY_0 0x6014 +#define NOTIFY_MAX (0x3ff << 20) +#define NOTIFY_CURRENT (0x3ff << 10) +#define NOTIFY_CURRENT_SHIFT 10 +#define NOTIFY_SIZE (0x3ff << 0) + +/* VI_CH registers. Start from 0x10000, offset 0x10000 */ +#define CHANNEL_COMMAND 0x004 +#define WR_ACT_SEL (0x1 << 5) +#define RD_MUX_SEL (0x1 << 4) +#define AUTOLOAD (0x1 << 1) +#define LOAD (0x1 << 0) + +#define CONTROL 0x01c +#define SPARE (0xffff << 16) +#define POST_RUNAWAY_EMBED (0x1 << 4) +#define POST_RUNAWAY_PIXEL (0x1 << 3) +#define EARLY_ABORT (0x1 << 2) +#define SINGLESHOT (0x1 << 1) +#define MATCH_STATE_EN (0x1 << 0) + +#define MATCH 0x020 +#define STREAM (0x3f << 14) +#define STREAM_SHIFT (14) +#define STREAM_MASK (0x3f << 8) +#define VIRTUAL_CHANNEL (0xf << 4) +#define VIRTUAL_CHANNEL_SHIFT (4) +#define VIRTUAL_CHANNEL_MASK (0xf << 0) + +#define MATCH_DATATYPE 0x024 +#define DATATYPE (0x3f << 6) +#define DATATYPE_SHIFT (6) +#define DATATYPE_MASK (0x3f << 0) +#define DATATYPE_MASK_SHIFT (0) + +#define MATCH_FRAMEID 0x028 +#define FRAMEID (0xffff << 16) +#define FRAMEID_SHIFT (16) +#define FRAMEID_MASK (0xffff << 0) + +#define DT_OVERRIDE 0x02c +#define OVRD_DT (0x3f << 1) +#define DT_OVRD_EN (0x1 << 0) + +#define FRAME_X 0x030 +#define CROP_X 0x04c +#define OUT_X 0x058 +#define WIDTH (0xffff < 0) + +#define FRAME_Y 0x034 +#define CROP_Y 0x054 +#define OUT_Y 0x05c +#define HEIGHT (0xffff < 0) + +#define EMBED_X 0x038 +#define MAX_BYTES (0x3ffff < 0) + +#define EMBED_Y 0x03c +#define SKIP_Y 0x050 +#define LINES (0xffff < 0) +/* for EMBED_Y only */ +#define EXPECT (0x1 << 24) + +#define LINE_TIMER 0x044 +#define LINE_TIMER_EN (0x1 << 25) +#define PERIODIC (0x1 << 24) +#define TRIPLINE (0xffff << 0) + +#define SKIP_X 0x048 +#define PACKETS (0x1fff << 0) + +#define NOTIFY_MASK 0x060 +#define MASK_DTYPE_MISMATCH (0x1 << 31) +#define MASK_EMBED_INFRINGE (0x1 << 22) +#define MASK_EMBED_LONG_LINE (0x1 << 21) +#define MASK_EMBED_SPURIOUS (0x1 << 20) +#define MASK_EMBED_RUNAWAY (0x1 << 19) +#define MASK_EMBED_MISSING_LE (0x1 << 18) +#define MASK_EMBED_EOF (0x1 << 17) +#define MASK_EMBED_SOF (0x1 << 16) +#define MASK_PIXEL_LINE_TIMER (0x1 << 7) +#define MASK_PIXEL_SHORT_LINE (0x1 << 6) +#define MASK_PIXEL_LONG_LINE (0x1 << 5) +#define MASK_PIXEL_SPURIOUS (0x1 << 4) +#define MASK_PIXEL_RUNAWAY (0x1 << 3) +#define MASK_PIXEL_MISSING_LE (0x1 << 2) +#define MASK_PIXEL_EOF (0x1 << 1) +#define MASK_PIXEL_SOF (0x1 << 0) + +#define NOTIFY_MASK_XCPT 0x064 +#define MASK_NOMATCH (0x1 << 9) +#define MASK_EMBED_OPEN_LINE (0x1 << 8) +#define MASK_PIXEL_OPEN_LINE (0x1 << 7) +#define MASK_FORCE_FE (0x1 << 6) +#define MASK_STALE_FRAME (0x1 << 5) +#define MASK_COLLISION (0x1 << 4) +#define MASK_EMPTY_FRAME (0x1 << 3) +#define MASK_EMBED_SHORT_FRAME (0x1 << 2) +#define MASK_PIXEL_SHORT_FRAME (0x1 << 1) +#define MASK_LOAD_FRAMED (0x1 << 0) + +#define FRAME_COUNT 0x06c + +#define PIXFMT_ENABLE 0x080 +#define PDAF_EN (0x1 << 2) +#define COMPAND_EN (0x1 << 1) +#define PIXFMT_EN (0x1 << 0) + +#define PIXFMT_FORMAT 0x084 +#define FORMAT (0xff << 0) +/* refer to enum tegra_image_format in core.h */ + +#define PIXFMT_WIDE 0x088 +#define ENDIAN_BIG (0x0 << 1) +#define ENDIAN_LITTLE (0x1 << 1) +#define PIXFMT_WIDE_EN (0x1 << 0) + +#define DPCM_STRIP 0x0b8 +#define OVERFETCH (0x1fff < 16) +#define STRIP_WIDTH (0x1fff < 0) + +#define ATOMP_DPCM_CHUNK 0x0ec +#define CHUNK_OFFSET (0x3ffff << 0) + +#define ATOMP_SURFACE_OFFSET0 0x0e0 +#define ATOMP_SURFACE_OFFSET1 0x0f0 +#define ATOMP_SURFACE_OFFSET2 0x0fc +#define ATOMP_EMB_SURFACE_OFFSET0 0x108 +#define SURFACE_OFFSET (0xffffffff << 0) + +#define ATOMP_SURFACE_OFFSET0_H 0x0e4 +#define ATOMP_SURFACE_OFFSET1_H 0x0f4 +#define ATOMP_SURFACE_OFFSET2_H 0x100 +#define ATOMP_EMB_SURFACE_OFFSET0_H 0x10c +#define SURFACE_OFFSET_HI (0xff << 0) + +#define ATOMP_SURFACE_STRIDE0 0x0e8 +#define ATOMP_SURFACE_STRIDE1 0x0f8 +#define ATOMP_SURFACE_STRIDE2 0x104 +#define ATOMP_EMB_SURFACE_STRIDE0 0x110 +#define SURFACE_STRIDE (0x3ffff << 0) +#define ATOMP_RESERVE 0x120 + +#define ISPBUFA 0x134 +#define ISPBUFA_EN (0x1 << 0) + +#define ISPBUFA_ERROR 0x1000 +#define FIFO_OVERFLOW (0x1 << 0) + +#define FMLITE_ERROR 0x313c +#define NOTIFY_ERROR 0x6020 + +#endif /* __VI4_REGISTERS_H__ */ diff --git a/include/soc/tegra/camrtc-capture-messages.h b/include/soc/tegra/camrtc-capture-messages.h new file mode 100644 index 00000000..c8891c7e --- /dev/null +++ b/include/soc/tegra/camrtc-capture-messages.h @@ -0,0 +1,1114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2022, NVIDIA Corporation. All rights reserved. + */ + +/** + * @file camrtc-capture-messages.h + * + * @brief Capture control and Capture IVC messages + */ + +#ifndef INCLUDE_CAMRTC_CAPTURE_MESSAGES_H +#define INCLUDE_CAMRTC_CAPTURE_MESSAGES_H + +#include "camrtc-capture.h" + +#pragma GCC diagnostic error "-Wpadded" + +/** + * @brief Standard message header for all capture and capture-control IVC messages. + * + * Control Requests not associated with a specific channel + * will use an opaque transaction ID rather than channel_id. + * The transaction ID in the response message is copied from + * the request message. + */ +struct CAPTURE_MSG_HEADER { + /** Message identifier. */ + uint32_t msg_id; + /** @anon_union */ + union { + /** Channel number. @anon_union_member */ + uint32_t channel_id; + /** Transaction id. @anon_union_member */ + uint32_t transaction; + }; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup ViCapCtrlMsgType Message types for capture-control IVC channel messages. + * @{ + */ +#define CAPTURE_CONTROL_RESERVED_10 MK_U32(0x10) +#define CAPTURE_CHANNEL_SETUP_REQ MK_U32(0x1E) +#define CAPTURE_CHANNEL_SETUP_RESP MK_U32(0x11) +#define CAPTURE_CHANNEL_RESET_REQ MK_U32(0x12) +#define CAPTURE_CHANNEL_RESET_RESP MK_U32(0x13) +#define CAPTURE_CHANNEL_RELEASE_REQ MK_U32(0x14) +#define CAPTURE_CHANNEL_RELEASE_RESP MK_U32(0x15) +#define CAPTURE_COMPAND_CONFIG_REQ MK_U32(0x16) +#define CAPTURE_COMPAND_CONFIG_RESP MK_U32(0x17) +#define CAPTURE_PDAF_CONFIG_REQ MK_U32(0x18) +#define CAPTURE_PDAF_CONFIG_RESP MK_U32(0x19) +#define CAPTURE_SYNCGEN_ENABLE_REQ MK_U32(0x1A) +#define CAPTURE_SYNCGEN_ENABLE_RESP MK_U32(0x1B) +#define CAPTURE_SYNCGEN_DISABLE_REQ MK_U32(0x1C) +#define CAPTURE_SYNCGEN_DISABLE_RESP MK_U32(0x1D) +/** @} */ + +/** + * @defgroup IspCapCtrlMsgType Message types for ISP capture-control IVC channel messages. + * @{ + */ +#define CAPTURE_CHANNEL_ISP_SETUP_REQ MK_U32(0x20) +#define CAPTURE_CHANNEL_ISP_SETUP_RESP MK_U32(0x21) +#define CAPTURE_CHANNEL_ISP_RESET_REQ MK_U32(0x22) +#define CAPTURE_CHANNEL_ISP_RESET_RESP MK_U32(0x23) +#define CAPTURE_CHANNEL_ISP_RELEASE_REQ MK_U32(0x24) +#define CAPTURE_CHANNEL_ISP_RELEASE_RESP MK_U32(0x25) +/** @} */ + +/** + * @defgroup ViCapMsgType Message types for capture channel IVC messages. + * @{ + */ +#define CAPTURE_REQUEST_REQ MK_U32(0x01) +#define CAPTURE_STATUS_IND MK_U32(0x02) +#define CAPTURE_RESET_BARRIER_IND MK_U32(0x03) +/** @} */ + +/** + * @defgroup IspCapMsgType Message types for ISP capture channel IVC messages. + * @{ + */ +#define CAPTURE_ISP_REQUEST_REQ MK_U32(0x04) +#define CAPTURE_ISP_STATUS_IND MK_U32(0x05) +#define CAPTURE_ISP_PROGRAM_REQUEST_REQ MK_U32(0x06) +#define CAPTURE_ISP_PROGRAM_STATUS_IND MK_U32(0x07) +#define CAPTURE_ISP_RESET_BARRIER_IND MK_U32(0x08) +#define CAPTURE_ISP_EX_STATUS_IND MK_U32(0x09) +/** @} */ + +/** + * @brief Invalid message type. This can be used to respond to an invalid request. + */ +#define CAPTURE_MSG_ID_INVALID MK_U32(0xFFFFFFFF) + +/** + * @brief Invalid channel id. Used when channel is not specified. + */ +#define CAPTURE_CHANNEL_ID_INVALID MK_U32(0xFFFFFFFF) + +typedef uint32_t capture_result; + +/** + * @defgroup CapErrorCodes Unsigned 32-bit return values for the capture-control IVC messages. + * @{ + */ +#define CAPTURE_OK MK_U32(0) +#define CAPTURE_ERROR_INVALID_PARAMETER MK_U32(1) +#define CAPTURE_ERROR_NO_MEMORY MK_U32(2) +#define CAPTURE_ERROR_BUSY MK_U32(3) +#define CAPTURE_ERROR_NOT_SUPPORTED MK_U32(4) +#define CAPTURE_ERROR_NOT_INITIALIZED MK_U32(5) +#define CAPTURE_ERROR_OVERFLOW MK_U32(6) +#define CAPTURE_ERROR_NO_RESOURCES MK_U32(7) +#define CAPTURE_ERROR_TIMEOUT MK_U32(8) +#define CAPTURE_ERROR_INVALID_STATE MK_U32(9) +/** @} */ + +/** + * @brief VI capture channel setup request message. + * + * Setup the VI Falcon channel context and initialize the + * RCE capture channel context. The GOS tables are also configured. + * The client shall use the transaction id field + * in the standard message header to associate request and response. + */ +struct CAPTURE_CHANNEL_SETUP_REQ_MSG { + /** Capture channel configuration. */ + struct capture_channel_config channel_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief VI Capture channel setup response message. + * + * The transaction id field in the standard message header + * will be copied from the associated request. + * + * The setup response message returns a channel_id, which + * identifies this set of resources and is used to refer to the + * allocated capture channel in subsequent messages. + */ +struct CAPTURE_CHANNEL_SETUP_RESP_MSG { + /** Capture result return value. See @ref CapErrorCodes "Return values" */ + capture_result result; + /** Capture channel identifier for the new channel. */ + uint32_t channel_id; + /** Bitmask of allocated VI channel(s). LSB is VI channel 0. */ + uint64_t vi_channel_mask; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup CapResetFlags VI Capture channel reset flags + * @{ + */ +/** Reset the channel without waiting for FE first. */ +#define CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE MK_U32(0x01) +/** @} */ + +/** + * @brief Reset a VI capture channel. + * + * Halt the associated VI channel. Flush the request queue for the + * channel and increment syncpoints in the request queue to their target + * values. + */ +struct CAPTURE_CHANNEL_RESET_REQ_MSG { + /** See @ref CapResetFlags "Reset flags" */ + uint32_t reset_flags; + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief VI capture channel reset response message. + * + * The response is sent after the RCE side channel cleanup is + * complete. If the reset barrier is not received within the timeout + * interval a CAPTURE_ERROR_TIMEOUT error is reported as the return value. + * If the reset succeeds then the return value is CAPTURE_OK. + */ +struct CAPTURE_CHANNEL_RESET_RESP_MSG { + /** Reset status return value. See @ref CapErrorCodes "Return values" */ + capture_result result; + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Release a VI capture channel and all the associated resources. + * + * Halt the associated VI channel and release the channel context. + */ +struct CAPTURE_CHANNEL_RELEASE_REQ_MSG { + /** Reset flags. Currently not used in release request. */ + uint32_t reset_flags; + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Capture channel release response message. + * + * The release is acknowledged after the channel cleanup is complete + * and all resources have been freed on RCE. + */ +struct CAPTURE_CHANNEL_RELEASE_RESP_MSG { + /** Release status return value. See @ref CapErrorCodes "Return values" */ + capture_result result; + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Configure the piece-wise linear function used by the VI companding module. + * + * The companding table is shared by all capture channels and must be + * configured before enabling companding for a specific capture. Each channel + * can explicitly enable processing by the companding unit i.e the channels can + * opt-out of the global companding config. See @ref CapErrorCodes "Capture request return codes" + * for more details on the return values. + */ +struct CAPTURE_COMPAND_CONFIG_REQ_MSG { + /** VI companding configuration */ + struct vi_compand_config compand_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief VI Companding unit configuration response message. + * + * Informs the client the status of VI companding unit configuration request. + * A return value of CAPTURE_OK in the result field indicates the request + * message succeeded. Any other value indicates an error. + */ +struct CAPTURE_COMPAND_CONFIG_RESP_MSG { + /** Companding config setup result. See @ref CapErrorCodes "Return values". */ + capture_result result; + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Configure the Phase Detection Auto Focus (PDAF) pattern. + */ +struct CAPTURE_PDAF_CONFIG_REQ_MSG { + /** PDAF configuration data */ + struct vi_pdaf_config pdaf_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Configure PDAF unit response message + * + * Returns the status PDAF unit configuration request. + * A return value of CAPTURE_OK in the result field indicates the request + * message succeeded. Any other value indicates an error. See + * @ref CapErrorCodes "Capture request return codes" for more details on + * the return values. + */ +struct CAPTURE_PDAF_CONFIG_RESP_MSG { + /** PDAF config setup result. See @ref CapErrorCodes "Return values". */ + capture_result result; + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/* + * @brief Enable SLVS-EC synchronization + * + * Enable the generation of XVS and XHS synchronization signals for a + * SLVS-EC sensor. + */ +struct CAPTURE_SYNCGEN_ENABLE_REQ_MSG { + /** Syncgen unit */ + uint32_t unit; + /** Reserved */ + uint32_t pad__; + /** VI SYNCGEN unit configuration */ + struct vi_syncgen_config syncgen_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Enable SLVS-EC synchronization response message. + * + * Returns the status of enable SLVS-EC synchronization request. + * A return value of CAPTURE_OK in the result field indicates the request + * message succeeded. Any other value indicates an error. See + * @ref CapErrorCodes "Capture request return codes" for more details on + * the return values. + */ +struct CAPTURE_SYNCGEN_ENABLE_RESP_MSG { + /** Syncgen unit */ + uint32_t unit; + /** Syncgen enable request result. See @ref CapErrorCodes "Return values". */ + capture_result result; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Disable SLVS-EC synchronization + * + * Disable the generation of XVS and XHS synchronization signals for a + * SLVS-EC sensor. + */ +struct CAPTURE_SYNCGEN_DISABLE_REQ_MSG { + /** Syncgen unit */ + uint32_t unit; + /** See SyncgenDisableFlags "Syncgen disable flags" */ + uint32_t syncgen_disable_flags; + +/** + * @defgroup SyncgenDisableFlags Syncgen disable flags + * @{ + */ +/** Disable SYNCGEN without waiting for frame end */ +#define CAPTURE_SYNCGEN_DISABLE_FLAG_IMMEDIATE MK_U32(0x01) +/** @} */ + +} CAPTURE_IVC_ALIGN; + +/** + * @brief Disable SLVS-EC synchronization response message. + * + * Returns the status of the SLVS-EC synchronization request message. + * A return value of CAPTURE_OK in the result field indicates the request + * message succeeded. Any other value indicates an error. See + * @ref CapErrorCodes "Capture request return codes" for more details on + * the return values. + */ +struct CAPTURE_SYNCGEN_DISABLE_RESP_MSG { + /** Syncgen unit */ + uint32_t unit; + /** Syncgen disable request result .See @ref CapErrorCodes "Return values". */ + capture_result result; +} CAPTURE_IVC_ALIGN; + + +/** + * @brief Open an NVCSI stream request message + * @deprecated + */ +struct CAPTURE_PHY_STREAM_OPEN_REQ_MSG { + /** See NvCsiStream "NVCSI stream id" */ + uint32_t stream_id; + /** See NvCsiPort "NvCSI Port" */ + uint32_t csi_port; + /** See @ref NvPhyType "NvCSI Physical stream type" */ + uint32_t phy_type; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NVCSI stream open response message + * @deprecated + */ +struct CAPTURE_PHY_STREAM_OPEN_RESP_MSG { + /** Stream open request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NVCSI stream close request message + * @deprecated + */ +struct CAPTURE_PHY_STREAM_CLOSE_REQ_MSG { + /** NVCSI stream Id */ + uint32_t stream_id; + /** NVCSI port */ + uint32_t csi_port; + /** See @ref NvPhyType "NvCSI Physical stream type" */ + uint32_t phy_type; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NVCSI stream close response message + * @deprecated + */ +struct CAPTURE_PHY_STREAM_CLOSE_RESP_MSG { + /** Stream close request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Physical stream dump registers request message. (Debug only) + */ +struct CAPTURE_PHY_STREAM_DUMPREGS_REQ_MSG { + /** NVCSI stream Id */ + uint32_t stream_id; + /** NVCSI port */ + uint32_t csi_port; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Physical stream dump registers response message. (Debug only) + */ +struct CAPTURE_PHY_STREAM_DUMPREGS_RESP_MSG { + /** Stream dump registers request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Set NVCSI stream configuration request message. + */ +struct CAPTURE_CSI_STREAM_SET_CONFIG_REQ_MSG { + /** NVCSI stream Id */ + uint32_t stream_id; + /** NVCSI port */ + uint32_t csi_port; + /** @ref See NvCsiConfigFlags "NVCSI Configuration Flags" */ + uint32_t config_flags; + /** Reserved */ + uint32_t pad32__; + /** NVCSI super control and interface logic (SCIL aka brick) configuration */ + struct nvcsi_brick_config brick_config; + /** NVCSI control and interface logic (CIL) partition configuration */ + struct nvcsi_cil_config cil_config; + /** User-defined error configuration */ + struct nvcsi_error_config error_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Set NVCSI stream configuration response message. + */ +struct CAPTURE_CSI_STREAM_SET_CONFIG_RESP_MSG { + /** NVCSI stream config request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Set NVCSI stream parameter request message. + */ +struct CAPTURE_CSI_STREAM_SET_PARAM_REQ_MSG { + /** NVCSI stream Id */ + uint32_t stream_id; + /** NVCSI stream virtual channel id */ + uint32_t virtual_channel_id; + /** The parameter to set. See @ref NvCsiParamType "NVCSI Parameter Type" */ + uint32_t param_type; + /** Reserved */ + uint32_t pad32__; + /** @anon_union */ + union { + /** Set DPCM config for an NVCSI stream @anon_union_member */ + struct nvcsi_dpcm_config dpcm_config; + /** NVCSI watchdog timer config @anon_union_member */ + struct nvcsi_watchdog_config watchdog_config; + }; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Set NVCSI stream parameter response message. + */ +struct CAPTURE_CSI_STREAM_SET_PARAM_RESP_MSG { + /** NVCSI set stream parameter request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NVCSI test pattern generator (TPG) stream config request message. + */ +struct CAPTURE_CSI_STREAM_TPG_SET_CONFIG_REQ_MSG { + /** TPG configuration */ + union nvcsi_tpg_config tpg_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NVCSI TPG stream config response message. + */ +struct CAPTURE_CSI_STREAM_TPG_SET_CONFIG_RESP_MSG { + /** Set TPG config request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Start NVCSI TPG streaming request message. + */ +struct CAPTURE_CSI_STREAM_TPG_START_REQ_MSG { + /** NVCSI stream Id */ + uint32_t stream_id; + /** NVCSI stream virtual channel id */ + uint32_t virtual_channel_id; + /** TPG rate configuration */ + struct nvcsi_tpg_rate_config tpg_rate_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Start NVCSI TPG streaming response message. + */ +struct CAPTURE_CSI_STREAM_TPG_START_RESP_MSG { + /** TPG start request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + + +/** + * @brief Start NVCSI TPG streaming at specified frame rate request message. + * + * This message is similar to CAPTURE_CSI_STREAM_TPG_START_REQ_MSG. Here the frame rate + * and clock is specified using which the TPG rate config will be calculated. + */ +struct CAPTURE_CSI_STREAM_TPG_START_RATE_REQ_MSG { + /** NVCSI stream Id */ + uint32_t stream_id; + /** NVCSI stream virtual channel id */ + uint32_t virtual_channel_id; + /** TPG frame rate in Hz */ + uint32_t frame_rate; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NVCSI TPG stream start at a specified frame rate response message. + */ +struct CAPTURE_CSI_STREAM_TPG_START_RATE_RESP_MSG { + /** TPG start rate request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup gain ratio settings that can be set to frame generated by NVCSI TPG. + * @{ + */ +#define CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_EIGHT_TO_ONE MK_U8(0) /* 8:1 gain */ +#define CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_FOUR_TO_ONE MK_U8(1) /* 4:1 gain */ +#define CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_TWO_TO_ONE MK_U8(2) /* 2:1 gain */ +#define CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_NONE MK_U8(3) /* 1:1 gain */ +#define CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_HALF MK_U8(4) /* 0.5:1 gain */ +#define CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_ONE_FOURTH MK_U8(5) /* 0.25:1 gain */ +#define CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_ONE_EIGHTH MK_U8(6) /* 0.125:1 gain */ + +/** + * @brief Apply gain ratio on specified VC of the desired CSI stream. + * + * This message is request to apply gain on specified vc, and it be + * applied on next frame. + */ +struct CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_REQ_MSG { + /** NVCSI stream Id */ + uint32_t stream_id; + /** NVCSI stream virtual channel id */ + uint32_t virtual_channel_id; + /** Gain ratio */ + uint32_t gain_ratio; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NVCSI TPG stream start at a specified frame rate response message. + */ +struct CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_RESP_MSG { + /** TPG apply gain request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Stop NVCSI TPG streaming request message. + */ +struct CAPTURE_CSI_STREAM_TPG_STOP_REQ_MSG { + /** NVCSI stream Id */ + uint32_t stream_id; + /** NVCSI stream virtual channel id */ + uint32_t virtual_channel_id; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Stop NVCSI TPG streaming response message. + */ +struct CAPTURE_CSI_STREAM_TPG_STOP_RESP_MSG { + /** Stop TPG steaming request status. See @ref CapErrorCodes "Return values". */ + uint32_t result; + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Max number of events + */ +#define VI_NUM_INJECT_EVENTS 10U + +/** + * @brief Event injection configuration. + * + * A capture request must be sent before this message + */ +struct CAPTURE_CHANNEL_EI_REQ_MSG { + /** Event data used for event injection */ + struct event_inject_msg events[VI_NUM_INJECT_EVENTS]; + /** Number of error events */ + uint8_t num_events; + /** Reserved */ + uint8_t pad__[7]; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Acknowledge Event Injection request + */ +struct CAPTURE_CHANNEL_EI_RESP_MSG { + /** Stop TPG steaming request status. See @ref CapErrorCodes "Return values". */ + capture_result result; + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Event injection channel reset request. + */ +struct CAPTURE_CHANNEL_EI_RESET_REQ_MSG { + /** Reserved */ + uint8_t pad__[8]; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Acknowledge Event injection channel reset request. + */ +struct CAPTURE_CHANNEL_EI_RESET_RESP_MSG { + /** Event injection channel reset request result. See @ref CapErrorCodes "Return values". */ + capture_result result; + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup PhyStreamMsgType Message types for NvPhy + * @{ + */ +#define CAPTURE_PHY_STREAM_OPEN_REQ MK_U32(0x36) +#define CAPTURE_PHY_STREAM_OPEN_RESP MK_U32(0x37) +#define CAPTURE_PHY_STREAM_CLOSE_REQ MK_U32(0x38) +#define CAPTURE_PHY_STREAM_CLOSE_RESP MK_U32(0x39) +#define CAPTURE_PHY_STREAM_DUMPREGS_REQ MK_U32(0x3C) +#define CAPTURE_PHY_STREAM_DUMPREGS_RESP MK_U32(0x3D) +/** @} */ + +/** + * @defgroup NvCsiMsgType Message types for NVCSI + * @{ + */ +#define CAPTURE_CSI_STREAM_SET_CONFIG_REQ MK_U32(0x40) +#define CAPTURE_CSI_STREAM_SET_CONFIG_RESP MK_U32(0x41) +#define CAPTURE_CSI_STREAM_SET_PARAM_REQ MK_U32(0x42) +#define CAPTURE_CSI_STREAM_SET_PARAM_RESP MK_U32(0x43) +#define CAPTURE_CSI_STREAM_TPG_SET_CONFIG_REQ MK_U32(0x44) +#define CAPTURE_CSI_STREAM_TPG_SET_CONFIG_RESP MK_U32(0x45) +#define CAPTURE_CSI_STREAM_TPG_START_REQ MK_U32(0x46) +#define CAPTURE_CSI_STREAM_TPG_START_RESP MK_U32(0x47) +#define CAPTURE_CSI_STREAM_TPG_STOP_REQ MK_U32(0x48) +#define CAPTURE_CSI_STREAM_TPG_STOP_RESP MK_U32(0x49) +#define CAPTURE_CSI_STREAM_TPG_START_RATE_REQ MK_U32(0x4A) +#define CAPTURE_CSI_STREAM_TPG_START_RATE_RESP MK_U32(0x4B) +#define CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_REQ MK_U32(0x4C) +#define CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_RESP MK_U32(0x4D) +/** @} */ + +/** + * @addtogroup NvCsiMsgType Message types for NVCSI + * @{ + */ +#define CAPTURE_CHANNEL_EI_REQ MK_U32(0x50) +#define CAPTURE_CHANNEL_EI_RESP MK_U32(0x51) +#define CAPTURE_CHANNEL_EI_RESET_REQ MK_U32(0x52) +#define CAPTURE_CHANNEL_EI_RESET_RESP MK_U32(0x53) +/** @} */ + +/** + * @addtogroup ViCapCtrlMsgType + * @{ + */ +#define CAPTURE_HSM_CHANSEL_ERROR_MASK_REQ MK_U32(0x54) +#define CAPTURE_HSM_CHANSEL_ERROR_MASK_RESP MK_U32(0x55) +/** @} */ + +/** + * @addtogroup ViCapCtrlMsgs + * @{ + */ +/** + * @brief Set CHANSEL error mask from HSM reporting message + */ +struct CAPTURE_HSM_CHANSEL_ERROR_MASK_REQ_MSG { + /** VI EC/HSM global CHANSEL error mask configuration */ + struct vi_hsm_chansel_error_mask_config hsm_chansel_error_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Acknowledge CHANEL error mask request + */ +struct CAPTURE_HSM_CHANSEL_ERROR_MASK_RESP_MSG { + /** HSM CHANSEL error mask request result. See @ref CapErrorCodes "Return values". */ + capture_result result; + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; +/** @} */ + +/** + * @brief Set up RCE side resources for ISP capture pipe-line. + * + * The client shall use the transaction id field in the + * standard message header to associate request and response. + */ +struct CAPTURE_CHANNEL_ISP_SETUP_REQ_MSG { + /** ISP process channel configuration. */ + struct capture_channel_isp_config channel_config; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Acknowledge isp capture channel setup request. + * + * The transaction id field in the standard message header + * will be copied from the associated request. + * + * The setup response message returns a channel_id, which + * identifies this set of resources and is used to refer to the + * allocated capture channel in subsequent messages. + */ +struct CAPTURE_CHANNEL_ISP_SETUP_RESP_MSG { + /** ISP process channel setup request status. See @ref CapErrorCodes "Return values". */ + capture_result result; + /** ISP process channel identifier for the new channel. */ + uint32_t channel_id; +} CAPTURE_IVC_ALIGN; + +typedef struct CAPTURE_CHANNEL_RESET_REQ_MSG + CAPTURE_CHANNEL_ISP_RESET_REQ_MSG; +typedef struct CAPTURE_CHANNEL_RESET_RESP_MSG + CAPTURE_CHANNEL_ISP_RESET_RESP_MSG; +typedef struct CAPTURE_CHANNEL_RELEASE_REQ_MSG + CAPTURE_CHANNEL_ISP_RELEASE_REQ_MSG; +typedef struct CAPTURE_CHANNEL_RELEASE_RESP_MSG + CAPTURE_CHANNEL_ISP_RELEASE_RESP_MSG; + +/** + * @brief Message frame for capture-control IVC channel. + */ +struct CAPTURE_CONTROL_MSG { + struct CAPTURE_MSG_HEADER header; + /** @anon_union */ + union { + /** @anon_union_member */ + struct CAPTURE_CHANNEL_SETUP_REQ_MSG channel_setup_req; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_SETUP_RESP_MSG channel_setup_resp; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_RESET_REQ_MSG channel_reset_req; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_RESET_RESP_MSG channel_reset_resp; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_RELEASE_REQ_MSG channel_release_req; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_RELEASE_RESP_MSG channel_release_resp; + /** @anon_union_member */ + struct CAPTURE_COMPAND_CONFIG_REQ_MSG compand_config_req; + /** @anon_union_member */ + struct CAPTURE_COMPAND_CONFIG_RESP_MSG compand_config_resp; + /** @anon_union_member */ + struct CAPTURE_PDAF_CONFIG_REQ_MSG pdaf_config_req; + /** @anon_union_member */ + struct CAPTURE_PDAF_CONFIG_RESP_MSG pdaf_config_resp; + /** @anon_union_member */ + struct CAPTURE_SYNCGEN_ENABLE_REQ_MSG syncgen_enable_req; + /** @anon_union_member */ + struct CAPTURE_SYNCGEN_ENABLE_RESP_MSG syncgen_enable_resp; + /** @anon_union_member */ + struct CAPTURE_SYNCGEN_DISABLE_REQ_MSG syncgen_disable_req; + /** @anon_union_member */ + struct CAPTURE_SYNCGEN_DISABLE_RESP_MSG syncgen_disable_resp; + + /** @anon_union_member */ + struct CAPTURE_PHY_STREAM_OPEN_REQ_MSG phy_stream_open_req; + /** @anon_union_member */ + struct CAPTURE_PHY_STREAM_OPEN_RESP_MSG phy_stream_open_resp; + /** @anon_union_member */ + struct CAPTURE_PHY_STREAM_CLOSE_REQ_MSG phy_stream_close_req; + /** @anon_union_member */ + struct CAPTURE_PHY_STREAM_CLOSE_RESP_MSG phy_stream_close_resp; + /** @anon_union_member */ + struct CAPTURE_PHY_STREAM_DUMPREGS_REQ_MSG + phy_stream_dumpregs_req; + /** @anon_union_member */ + struct CAPTURE_PHY_STREAM_DUMPREGS_RESP_MSG + phy_stream_dumpregs_resp; + + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_SET_CONFIG_REQ_MSG + csi_stream_set_config_req; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_SET_CONFIG_RESP_MSG + csi_stream_set_config_resp; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_SET_PARAM_REQ_MSG + csi_stream_set_param_req; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_SET_PARAM_RESP_MSG + csi_stream_set_param_resp; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_SET_CONFIG_REQ_MSG + csi_stream_tpg_set_config_req; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_SET_CONFIG_RESP_MSG + csi_stream_tpg_set_config_resp; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_START_REQ_MSG + csi_stream_tpg_start_req; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_START_RESP_MSG + csi_stream_tpg_start_resp; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_STOP_REQ_MSG + csi_stream_tpg_stop_req; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_STOP_RESP_MSG + csi_stream_tpg_stop_resp; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_START_RATE_REQ_MSG + csi_stream_tpg_start_rate_req; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_START_RATE_RESP_MSG + csi_stream_tpg_start_rate_resp; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_REQ_MSG + csi_stream_tpg_apply_gain_req; + /** @anon_union_member */ + struct CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_RESP_MSG + csi_stream_tpg_apply_gain_resp; + + /** @anon_union_member */ + struct CAPTURE_CHANNEL_EI_REQ_MSG ei_req; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_EI_RESP_MSG ei_resp; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_EI_RESET_REQ_MSG ei_reset_req; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_EI_RESET_RESP_MSG ei_reset_resp; + + /** @anon_union_member */ + struct CAPTURE_CHANNEL_ISP_SETUP_REQ_MSG channel_isp_setup_req; + /** @anon_union_member */ + struct CAPTURE_CHANNEL_ISP_SETUP_RESP_MSG channel_isp_setup_resp; + /** @anon_union_member */ + CAPTURE_CHANNEL_ISP_RESET_REQ_MSG channel_isp_reset_req; + /** @anon_union_member */ + CAPTURE_CHANNEL_ISP_RESET_RESP_MSG channel_isp_reset_resp; + /** @anon_union_member */ + CAPTURE_CHANNEL_ISP_RELEASE_REQ_MSG channel_isp_release_req; + /** @anon_union_member */ + CAPTURE_CHANNEL_ISP_RELEASE_RESP_MSG channel_isp_release_resp; + + /** @anon_union_member */ + struct CAPTURE_HSM_CHANSEL_ERROR_MASK_REQ_MSG hsm_chansel_mask_req; + /** @anon_union_member */ + struct CAPTURE_HSM_CHANSEL_ERROR_MASK_RESP_MSG hsm_chansel_mask_resp; + }; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Enqueue a new capture request on a capture channel. + * + * The request contains channel identifier and the capture sequence + * number, which are required to schedule the capture request. The + * actual capture programming is stored in the capture descriptor, + * stored in a DRAM ring buffer set up with CAPTURE_CHANNEL_SETUP_REQ. + * + * The capture request descriptor with buffer_index=N can be located + * within the ring buffer as follows: + * + * struct capture_descriptor *desc = requests + buffer_index * request_size; + * + * The capture request message is asynchronous. Capture completion is + * indicated by incrementing the progress syncpoint a pre-calculated + * number of times = 1 + . The first increment + * occurs at start-of-frame and the last increment occurs at + * end-of-frame. The progress-syncpoint is used to synchronize with + * down-stream engines. This model assumes that the capture client + * knows the number of subframes used in the capture and has + * programmed the VI accordingly. + * + * If the flag CAPTURE_FLAG_STATUS_REPORT_ENABLE is set in the capture + * descriptor, RCE will store the capture status into status field + * of the descriptor. RCE will also send a CAPTURE_STATUS_IND + * message to indicate that capture has completed. The capture status + * record contains information about the capture, such as CSI frame + * number, start-of-frame and end-of-frame timestamps, as well as + * error status. + * + * If the flag CAPTURE_FLAG_ERROR_REPORT_ENABLE is set, RCE will send a + * CAPTURE_STATUS_IND upon an error, even if + * CAPTURE_FLAG_STATUS_REPORT_ENABLE is not set. + */ +struct CAPTURE_REQUEST_REQ_MSG { + /** Buffer index identifying capture descriptor. */ + uint32_t buffer_index; + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Capture status indication. + * + * The message is sent after the capture status record has been + * written into the capture request descriptor. + */ +struct CAPTURE_STATUS_IND_MSG { + /** Buffer index identifying capture descriptor. */ + uint32_t buffer_index; + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + + +/** + * @brief Send new isp_capture request on a capture channel. + * + * The request contains channel identifier and the capture sequence + * number (ring-buffer index), which are required to schedule the + * isp capture request. + * The actual capture programming is stored in isp_capture_descriptor, + * stored in DRAM ring buffer, which includes the sequence, ISP + * surfaces' details, surface related configs, ISP PB2 iova, input prefences, + * and isp_capture status written by RCE. + * + * NvCapture UMD allocates the pool of isp_capture descriptors in setup call, + * where each isp_capture_desc is followed by corresponding PB2 memory + * (ATOM aligned). + * RCE would generate the PB2 using surface details found in isp_capture + * descriptor. + * The ring-buffer (pool) would look like below: + * + * [isp_capture_desc][PB2][isp_capture_desc][PB2][isp_capture_desc]... + * + * The isp_capture_descriptor with buffer_index=N can be located within + * the ring buffer as follows: + * + * isp_capture_descriptor *desc = requests + buffer_index * request_size; + * + * Note, here request_size = sizeof (isp_capture_descriptor) + sizeof (PB2). + * + * UMD fills isp_capture_desc and submits the request to KMD which pins the + * surfaces and PB and then does the in-place replacement with iovas' within + * isp_capture_descriptor. + * KMD then sends the isp_capture request to RCE over capture ivc channel. + * + * The isp capture request message is asynchronous. Capture completion is + * indicated by incrementing the progress syncpoint a pre-calculated + * number of times = . The progress-syncpoint is + * used to synchronize with down-stream engines. This model assumes that + * the capture client knows the number of subframes used in the capture and has + * programmed the ISP accordingly. + * All stats completion are indicated by incrementing stats progress syncpoint + * a number of times = . + * + * If the flag CAPTURE_FLAG_ISP_STATUS_REPORT_ENABLE is set in the isp + * capture descriptor, RCE will store the capture status into status field + * of the descriptor. RCE will also send a CAPTURE_ISP_STATUS_IND + * message to indicate that capture has completed. + * + * If the flag CAPTURE_FLAG_ISP_ERROR_REPORT_ENABLE is set, RCE will send a + * CAPTURE_ISP_STATUS_IND upon an error, even if + * CAPTURE_FLAG_ISP_STATUS_REPORT_ENABLE is not set. + * + * Typedef-ed CAPTURE_REQUEST_REQ_MSG. + * + * The buffer_index field is isp_capture_descriptor index in ring buffer. + */ +typedef struct CAPTURE_REQUEST_REQ_MSG CAPTURE_ISP_REQUEST_REQ_MSG; + +/** + * @brief ISP Capture status indication. + * + * The message is sent after the capture status record has been + * written into the capture request descriptor. + * + * The buffer_index in this case is identifying the ISP capture descriptor. + */ +typedef struct CAPTURE_STATUS_IND_MSG CAPTURE_ISP_STATUS_IND_MSG; + +/** + * @brief Extended ISP capture status indication. + * + * The message is sent after the capture status record has been + * written into the capture request descriptor. + */ +struct CAPTURE_ISP_EX_STATUS_IND_MSG { + /** Buffer index identifying ISP process descriptor. */ + uint32_t process_buffer_index; + /** Buffer index identifying ISP program descriptor. */ + uint32_t program_buffer_index; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Send new isp_program request on a capture ivc channel. + * + * The request contains channel identifier and the program sequence + * number (ring-buffer index). + * The actual programming details is stored in isp_program + * descriptor, which includes the offset to isp_program + * buffer (which has PB1 containing ISP HW settings), sequence, + * settings-id, activation-flags, isp_program buffer size, iova's + * of ISP PB1 and isp_program status written by RCE. + * + * NvCapture UMD allocates the pool of isp_program descriptors in setup call, + * where each isp_pgram_descriptor is followed by corresponding isp_program + * buffer (ATOM aligned). + * The ring-buffer (pool) would look like below: + * + * [isp_prog_desc][isp_program][isp_prog_desc][isp_program][isp_prog_desc]... + * + * The isp_program_descriptor with buffer_index=N can be located within + * the ring buffer as follows: + * + * isp_program_descriptor *desc = programs + buffer_index * program_size; + * + * Note, program_size = sizeof (isp_program_descriptor) + sizeof (isp_program). + * + * NvISP fills these and submits the isp_program request to KMD which pins the + * PB and then does the in-place replacement with iova within + * isp_program_descriptor. + * KMD then sends the isp_program request to RCE over capture ivc channel. + * + * The sequence is the frame_id which tells RCE, that the given isp_program + * must be used from that frame_id onwards until UMD provides new one. + * So RCE will use the sequence field to select the correct isp_program from + * the isp_program descriptors' ring buffer for given frame request and will + * keep on using it for further frames until the new isp_program (desc) is + * provided to be used. + * RCE populates both matched isp_program (reads from isp program desc) and + * isp capture descriptor and forms single task descriptor for given frame + * request and feeds it to falcon, which further programs it to ISP. + * + * settings_id is unique id for isp_program, NvCapture and RCE will use + * the ring buffer array index as settings_id. + * It can also be used to select the correct isp_program for the given + * frame, in that case, UMD writes this unique settings_id to sensor's + * scratch register, and sensor will send back it as part of embedded data, + * when the given settings/gains are applied on that particular frame + * coming from sensor. + * + * RCE reads this settings_id back from embedded data and uses it to select + * the corresponding isp_program from the isp_program desc ring buffer. + * The activation_flags tells the RCE which id (sequence or settings_id) to + * use to select correct isp_program for the given frame. + * + * As same isp_program can be used for multiple frames, it can not be freed + * when the frame capture is done. RCE will send a separate status + * indication CAPTURE_ISP_PROGRAM_STATUS_IND message to CCPEX to notify + * that the given isp_program is no longer in use and can be freed or reused. + * settings_id (ring-buffer index) field is used to uniquely identify the + * correct isp_program. + * RCE also writes the isp_program status in isp program descriptor. + * + * Typedef-ed CAPTURE_REQUEST_REQ_MSG. + * + * The buffer_index field is the isp_program descriptor index in ring buffer. + */ +typedef struct CAPTURE_REQUEST_REQ_MSG CAPTURE_ISP_PROGRAM_REQUEST_REQ_MSG; + +/** + * @brief ISP program status indication. + * + * The message is sent to notify CCPLEX about the isp_program which is expired + * so UMD client can free or reuse it. + * + * Typedef-ed CAPTURE_STATUS_IND_MSG. + * + * The buffer_index field in this case is identifying ISP program descriptor. + */ +typedef struct CAPTURE_STATUS_IND_MSG CAPTURE_ISP_PROGRAM_STATUS_IND_MSG; + +/** + * @brief Message frame for capture IVC channel. + */ +struct CAPTURE_MSG { + struct CAPTURE_MSG_HEADER header; + /** @anon_union */ + union { + /** @anon_union_member */ + struct CAPTURE_REQUEST_REQ_MSG capture_request_req; + /** @anon_union_member */ + struct CAPTURE_STATUS_IND_MSG capture_status_ind; + + /** @anon_union_member */ + CAPTURE_ISP_REQUEST_REQ_MSG capture_isp_request_req; + /** @anon_union_member */ + CAPTURE_ISP_STATUS_IND_MSG capture_isp_status_ind; + /** @anon_union_member */ + struct CAPTURE_ISP_EX_STATUS_IND_MSG capture_isp_ex_status_ind; + + /** @anon_union_member */ + CAPTURE_ISP_PROGRAM_REQUEST_REQ_MSG + capture_isp_program_request_req; + /** @anon_union_member */ + CAPTURE_ISP_PROGRAM_STATUS_IND_MSG + capture_isp_program_status_ind; + }; +} CAPTURE_IVC_ALIGN; + +#pragma GCC diagnostic ignored "-Wpadded" + +#endif /* INCLUDE_CAMRTC_CAPTURE_MESSAGES_H */ diff --git a/include/soc/tegra/camrtc-capture.h b/include/soc/tegra/camrtc-capture.h new file mode 100644 index 00000000..8986a1a2 --- /dev/null +++ b/include/soc/tegra/camrtc-capture.h @@ -0,0 +1,2891 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +/** + * @file camrtc-capture.h + * + * @brief Camera firmware API header + */ + +#ifndef INCLUDE_CAMRTC_CAPTURE_H +#define INCLUDE_CAMRTC_CAPTURE_H + +#include "camrtc-common.h" + +#pragma GCC diagnostic error "-Wpadded" + +#define CAPTURE_IVC_ALIGNOF MK_ALIGN(8) +#define CAPTURE_DESCRIPTOR_ALIGN_BYTES (64) +#define CAPTURE_DESCRIPTOR_ALIGNOF MK_ALIGN(CAPTURE_DESCRIPTOR_ALIGN_BYTES) + +#define CAPTURE_IVC_ALIGN CAMRTC_ALIGN(CAPTURE_IVC_ALIGNOF) +#define CAPTURE_DESCRIPTOR_ALIGN CAMRTC_ALIGN(CAPTURE_DESCRIPTOR_ALIGNOF) + +typedef uint64_t iova_t CAPTURE_IVC_ALIGN; + +#define SYNCPOINT_ID_INVALID MK_U32(0) +#define GOS_INDEX_INVALID MK_U8(0xFF) + +#pragma GCC diagnostic warning "-Wdeprecated-declarations" +#define CAMRTC_DEPRECATED __attribute__((deprecated)) + +/*Status Fence Support*/ +#define STATUS_FENCE_SUPPORT + +typedef struct syncpoint_info { + /** Syncpoint ID */ + uint32_t id; + /** Syncpoint threshold when storing a fence */ + uint32_t threshold; + /** Grid of Semaphores (GOS) SMMU stream id */ + uint8_t gos_sid; + /** GOS index */ + uint8_t gos_index; + /** GOS offset */ + uint16_t gos_offset; + /** Reserved */ + uint32_t pad_; + /** IOVA address of the Host1x syncpoint register */ + iova_t shim_addr; +} syncpoint_info_t CAPTURE_IVC_ALIGN; + +/** + * @defgroup StatsSize Statistics data size defines for ISP5 + * + * The size for each unit includes the standard ISP5 HW stats + * header size. + * + * Size break down for each unit. + * FB = 32 byte header + (256 x 4) bytes. FB has 256 windows with 4 bytes + * of stats data per window. + * FM = 32 byte header + (64 x 64 x 2 x 4) bytes. FM can have 64 x 64 windows + * with each windows having 2 bytes of data for each color channel. + * AFM = 32 byte header + 8 byte statistics data + 8 bytes padding per ROI. + * LAC = 32 byte header + ( (32 x 32) x ((4 + 2 + 2) x 4) ) + * Each ROI has 32x32 windows with each window containing 8 + * bytes of data per color channel. + * Hist = Header + (256 x 4 x 4) bytes since Hist unit has 256 bins and + * each bin collects 4 byte data for each color channel + 4 Dwords for + * excluded pixel count due to elliptical mask per color channel. + * Pru = 32 byte header + (8 x 4) bytes for bad pixel count and accumulated + * pixel adjustment for pixels both inside and outside the ROI. + * LTM = 32 byte header + (128 x 4) bytes for histogram data + (8 x 8 x 4 x 2) + * bytes for soft key average and count. Soft key statistics are + * collected by dividing the frame into a 8x8 array region. + * @{ + */ +/** Statistics unit hardware header size in bytes */ +#define ISP5_STATS_HW_HEADER_SIZE MK_SIZE(32) +/** Flicker band (FB) unit statistics data size in bytes */ +#define ISP5_STATS_FB_MAX_SIZE MK_SIZE(1056) +/** Focus Metrics (FM) unit statistics data size in bytes */ +#define ISP5_STATS_FM_MAX_SIZE MK_SIZE(32800) +/** Auto Focus Metrics (AFM) unit statistics data size in bytes */ +#define ISP5_STATS_AFM_ROI_MAX_SIZE MK_SIZE(48) +/** Local Average Clipping (LAC) unit statistics data size in bytes */ +#define ISP5_STATS_LAC_ROI_MAX_SIZE MK_SIZE(32800) +/** Histogram unit statistics data size in bytes */ +#define ISP5_STATS_HIST_MAX_SIZE MK_SIZE(4144) +/** Pixel Replacement Unit (PRU) unit statistics data size in bytes */ +#define ISP5_STATS_OR_MAX_SIZE MK_SIZE(64) +/** Local Tone Mapping (LTM) unit statistics data size in bytes */ +#define ISP5_STATS_LTM_MAX_SIZE MK_SIZE(1056) + +/* Stats buffer addresses muse be aligned to 64 byte (ATOM) boundaries */ +#define ISP5_ALIGN_STAT_OFFSET(_offset) \ + (((uint32_t)(_offset) + MK_U32(63)) & ~(MK_U32(63))) + +/** Flicker band (FB) unit statistics data offset */ +#define ISP5_STATS_FB_OFFSET MK_SIZE(0) +/** Focus Metrics (FM) unit statistics data offset */ +#define ISP5_STATS_FM_OFFSET \ + (ISP5_STATS_FB_OFFSET + ISP5_ALIGN_STAT_OFFSET(ISP5_STATS_FB_MAX_SIZE)) +/** Auto Focus Metrics (AFM) unit statistics data offset */ +#define ISP5_STATS_AFM_OFFSET \ + (ISP5_STATS_FM_OFFSET + ISP5_ALIGN_STAT_OFFSET(ISP5_STATS_FM_MAX_SIZE)) +/** Local Average Clipping (LAC0) unit statistics data offset */ +#define ISP5_STATS_LAC0_OFFSET \ + (ISP5_STATS_AFM_OFFSET + \ + (ISP5_ALIGN_STAT_OFFSET(ISP5_STATS_AFM_ROI_MAX_SIZE) * MK_SIZE(8))) +/** Local Average Clipping (LAC1) unit statistics data offset */ +#define ISP5_STATS_LAC1_OFFSET \ + (ISP5_STATS_LAC0_OFFSET + \ + (ISP5_ALIGN_STAT_OFFSET(ISP5_STATS_LAC_ROI_MAX_SIZE) * MK_SIZE(4))) +/** Histogram unit (H0) statistics data offset */ +#define ISP5_STATS_HIST0_OFFSET \ + (ISP5_STATS_LAC1_OFFSET + \ + (ISP5_ALIGN_STAT_OFFSET(ISP5_STATS_LAC_ROI_MAX_SIZE) * MK_SIZE(4))) +/** Histogram unit (H1) statistics data offset */ +#define ISP5_STATS_HIST1_OFFSET \ + (ISP5_STATS_HIST0_OFFSET + \ + ISP5_ALIGN_STAT_OFFSET(ISP5_STATS_HIST_MAX_SIZE)) +/** Pixel Replacement Unit (PRU) unit statistics data offset */ +#define ISP5_STATS_OR_OFFSET \ + (ISP5_STATS_HIST1_OFFSET + \ + ISP5_ALIGN_STAT_OFFSET(ISP5_STATS_HIST_MAX_SIZE)) +/** Local Tone Mapping (LTM) unit statistics data offset */ +#define ISP5_STATS_LTM_OFFSET \ + (ISP5_STATS_OR_OFFSET + \ + ISP5_ALIGN_STAT_OFFSET(ISP5_STATS_OR_MAX_SIZE)) +/** Total statistics data size in bytes */ +#define ISP5_STATS_TOTAL_SIZE \ + (ISP5_STATS_LTM_OFFSET + ISP5_STATS_LTM_MAX_SIZE) +/**@}*/ + +/** + * @defgroup StatsSize Statistics data size defines for ISP6 + * + * The size for each unit includes the standard ISP6 HW stats + * header size. + * + * Size break down for each unit. + * FB = 32 byte header + (512 x 4) bytes. FB has 512 windows with 4 bytes + * of stats data per window. + * FM = 32 byte header + (64 x 64 x 2 x 4) bytes. FM can have 64 x 64 windows + * with each windows having 2 bytes of data for each color channel. + * AFM = 32 byte header + 16 byte statistics data per ROI. + * LAC = 32 byte header + ( (32 x 32) x ((4 + 2 + 2) x 4) ) + * Each ROI has 32x32 windows with each window containing 8 + * bytes of data per color channel. + * Hist = Header + (256 x 4 x 4) bytes since Hist unit has 256 bins and + * each bin collects 4 byte data for each color channel + 4 Dwords for + * excluded pixel count due to elliptical mask per color channel. + * OR = 32 byte header + (8 x 4) bytes for bad pixel count and accumulated + * pixel adjustment for pixels both inside and outside the ROI. + * PRU hist = Header + (256 x 4 x 4) bytes since Hist unit has 256 bins and + * each bin collects 4 byte data for each color channel + 4 Dwords for + * excluded pixel count due to elliptical mask per color channel. + * LTM = 32 byte header + (128 x 4) bytes for histogram data + (8 x 8 x 4 x 2) + * bytes for soft key average and count. Soft key statistics are + * collected by dividing the frame into a 8x8 array region. + * @{ + */ +/** Statistics unit hardware header size in bytes */ +#define ISP6_STATS_HW_HEADER_SIZE MK_SIZE(32) +/** Flicker band (FB) unit statistics data size in bytes */ +#define ISP6_STATS_FB_MAX_SIZE MK_SIZE(2080) +/** Focus Metrics (FM) unit statistics data size in bytes */ +#define ISP6_STATS_FM_MAX_SIZE MK_SIZE(32800) +/** Auto Focus Metrics (AFM) unit statistics data size in bytes */ +#define ISP6_STATS_AFM_ROI_MAX_SIZE MK_SIZE(48) +/** Local Average Clipping (LAC) unit statistics data size in bytes */ +#define ISP6_STATS_LAC_ROI_MAX_SIZE MK_SIZE(32800) +/** Histogram unit statistics data size in bytes */ +#define ISP6_STATS_HIST_MAX_SIZE MK_SIZE(4144) +/** Pixel Replacement Unit (PRU) unit statistics data size in bytes */ +#define ISP6_STATS_OR_MAX_SIZE MK_SIZE(64) +/** PRU histogram (HIST_RAW24) unit statistics data size in bytes */ +#define ISP6_STATS_HIST_RAW24_MAX_SIZE MK_SIZE(1056) +/** Local Tone Mapping (LTM) unit statistics data size in bytes */ +#define ISP6_STATS_LTM_MAX_SIZE MK_SIZE(1056) +/* Stats buffer addresses muse be aligned to 64 byte (ATOM) boundaries */ +#define ISP6_ALIGN_STAT_OFFSET(_offset) \ + (((uint32_t)(_offset) + MK_U32(63)) & ~(MK_U32(63))) + +/** Flicker band (FB) unit statistics data offset */ +#define ISP6_STATS_FB_OFFSET MK_SIZE(0) +/** Focus Metrics (FM) unit statistics data offset */ +#define ISP6_STATS_FM_OFFSET \ + (ISP6_STATS_FB_OFFSET + ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_FB_MAX_SIZE)) +/** Auto Focus Metrics (AFM) unit statistics data offset */ +#define ISP6_STATS_AFM_OFFSET \ + (ISP6_STATS_FM_OFFSET + ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_FM_MAX_SIZE)) +/** Local Average Clipping (LAC0) unit statistics data offset */ +#define ISP6_STATS_LAC0_OFFSET \ + (ISP6_STATS_AFM_OFFSET + \ + (ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_AFM_ROI_MAX_SIZE) * MK_SIZE(8))) +/** Local Average Clipping (LAC1) unit statistics data offset */ +#define ISP6_STATS_LAC1_OFFSET \ + (ISP6_STATS_LAC0_OFFSET + \ + (ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_LAC_ROI_MAX_SIZE) * MK_SIZE(4))) +/** Histogram unit (H0) statistics data offset */ +#define ISP6_STATS_HIST0_OFFSET \ + (ISP6_STATS_LAC1_OFFSET + \ + (ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_LAC_ROI_MAX_SIZE) * MK_SIZE(4))) +/** Histogram unit (H1) statistics data offset */ +#define ISP6_STATS_HIST1_OFFSET \ + (ISP6_STATS_HIST0_OFFSET + \ + ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_HIST_MAX_SIZE)) +/** Outlier replacement (OR) unit statistics data offset */ +#define ISP6_STATS_OR_OFFSET \ + (ISP6_STATS_HIST1_OFFSET + \ + ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_HIST_MAX_SIZE)) +/** Raw data 24 bit histogram (HIST_RAW24) unit statistics data offset */ +#define ISP6_STATS_HIST_RAW24_OFFSET \ + (ISP6_STATS_OR_OFFSET + \ + ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_OR_MAX_SIZE)) +/** Local Tone Mapping (LTM) unit statistics data offset */ +#define ISP6_STATS_LTM_OFFSET \ + (ISP6_STATS_HIST_RAW24_OFFSET + \ + ISP6_ALIGN_STAT_OFFSET(ISP6_STATS_HIST_RAW24_MAX_SIZE)) +/** Total statistics data size in bytes */ +#define ISP6_STATS_TOTAL_SIZE \ + (ISP6_STATS_LTM_OFFSET + ISP6_STATS_LTM_MAX_SIZE) +/**@}*/ + +#define ISP_NUM_GOS_TABLES MK_U32(8) + +#define VI_NUM_GOS_TABLES MK_U32(12) +#define VI_NUM_ATOMP_SURFACES 4 +#define VI_NUM_STATUS_SURFACES 1 +#define VI_NUM_VI_PFSD_SURFACES 2 + +/** + * @defgroup ViAtompSurface VI ATOMP surface related defines + * @{ + */ +/** Output surface plane 0 */ +#define VI_ATOMP_SURFACE0 0 +/** Output surface plane 1 */ +#define VI_ATOMP_SURFACE1 1 +/** Output surface plane 2 */ +#define VI_ATOMP_SURFACE2 2 + +/** Sensor embedded data */ +#define VI_ATOMP_SURFACE_EMBEDDED 3 + +/** RAW pixels */ +#define VI_ATOMP_SURFACE_MAIN VI_ATOMP_SURFACE0 +/** PDAF pixels */ +#define VI_ATOMP_SURFACE_PDAF VI_ATOMP_SURFACE1 + +/** YUV - Luma plane */ +#define VI_ATOMP_SURFACE_Y VI_ATOMP_SURFACE0 +/** Semi-planar - UV plane */ +#define VI_ATOMP_SURFACE_UV VI_ATOMP_SURFACE1 +/** Planar - U plane */ +#define VI_ATOMP_SURFACE_U VI_ATOMP_SURFACE1 +/** Planar - V plane */ +#define VI_ATOMP_SURFACE_V VI_ATOMP_SURFACE2 + +/** @} */ + +/* SLVS-EC */ +#define SLVSEC_STREAM_DISABLED MK_U8(0xFF) + +/** + * @defgroup VICaptureChannelFlags + * VI Capture channel specific flags + */ +/**@{*/ +/** Channel takes input from Video Interface (VI) */ +#define CAPTURE_CHANNEL_FLAG_VIDEO MK_U32(0x0001) +/** Channel supports RAW Bayer output */ +#define CAPTURE_CHANNEL_FLAG_RAW MK_U32(0x0002) +/** Channel supports planar YUV output */ +#define CAPTURE_CHANNEL_FLAG_PLANAR MK_U32(0x0004) +/** Channel supports semi-planar YUV output */ +#define CAPTURE_CHANNEL_FLAG_SEMI_PLANAR MK_U32(0x0008) +/** Channel supports phase-detection auto-focus */ +#define CAPTURE_CHANNEL_FLAG_PDAF MK_U32(0x0010) +/** Channel outputs to Focus Metric Lite module (FML) */ +#define CAPTURE_CHANNEL_FLAG_FMLITE MK_U32(0x0020) +/** Channel outputs sensor embedded data */ +#define CAPTURE_CHANNEL_FLAG_EMBDATA MK_U32(0x0040) +/** Channel outputs to ISPA */ +#define CAPTURE_CHANNEL_FLAG_ISPA MK_U32(0x0080) +/** Channel outputs to ISPB */ +#define CAPTURE_CHANNEL_FLAG_ISPB MK_U32(0x0100) +/** Channel outputs directly to selected ISP (ISO mode) */ +#define CAPTURE_CHANNEL_FLAG_ISP_DIRECT MK_U32(0x0200) +/** Channel outputs to software ISP (reserved) */ +#define CAPTURE_CHANNEL_FLAG_ISPSW MK_U32(0x0400) +/** Channel treats all errors as stop-on-error and requires reset for recovery.*/ +#define CAPTURE_CHANNEL_FLAG_RESET_ON_ERROR MK_U32(0x0800) +/** Channel has line timer enabled */ +#define CAPTURE_CHANNEL_FLAG_LINETIMER MK_U32(0x1000) +/** Channel supports SLVSEC sensors */ +#define CAPTURE_CHANNEL_FLAG_SLVSEC MK_U32(0x2000) +/** Channel reports errors to HSM based on error_mask_correctable and error_mask_uncorrectable.*/ +#define CAPTURE_CHANNEL_FLAG_ENABLE_HSM_ERROR_MASKS MK_U32(0x4000) +/** Capture with VI PFSD enabled */ +#define CAPTURE_CHANNEL_FLAG_ENABLE_VI_PFSD MK_U32(0x8000) +/** Channel binds to a CSI stream and channel */ +#define CAPTURE_CHANNEL_FLAG_CSI MK_U32(0x10000) + + /**@}*/ + +/** + * @defgroup CaptureChannelErrMask + * Bitmask for masking "Uncorrected errors" and "Errors with threshold". + */ +/**@{*/ +/** VI Frame start error timeout */ +#define CAPTURE_CHANNEL_ERROR_VI_FRAME_START_TIMEOUT MK_BIT32(23) +/** VI Permanent Fault SW Diagnostics (PFSD) error */ +#define CAPTURE_CHANNEL_ERROR_VI_PFSD_FAULT MK_BIT32(22) +/** Embedded data incomplete */ +#define CAPTURE_CHANNEL_ERROR_ERROR_EMBED_INCOMPLETE MK_BIT32(21) +/** Pixel frame is incomplete */ +#define CAPTURE_CHANNEL_ERROR_INCOMPLETE MK_BIT32(20) +/** A Frame End appears from NVCSI before the normal number of pixels has appeared*/ +#define CAPTURE_CHANNEL_ERROR_STALE_FRAME MK_BIT32(19) +/** A start-of-frame matches a channel that is already in frame */ +#define CAPTURE_CHANNEL_ERROR_COLLISION MK_BIT32(18) +/** Frame end was forced by channel reset */ +#define CAPTURE_CHANNEL_ERROR_FORCE_FE MK_BIT32(17) +/** A LOAD command is received for a channel while that channel is currently in a frame.*/ +#define CAPTURE_CHANNEL_ERROR_LOAD_FRAMED MK_BIT32(16) +/** The pixel datatype changed in the middle of the line */ +#define CAPTURE_CHANNEL_ERROR_DTYPE_MISMATCH MK_BIT32(15) +/** Unexpected embedded data in frame */ +#define CAPTURE_CHANNEL_ERROR_EMBED_INFRINGE MK_BIT32(14) +/** Extra embedded bytes on line */ +#define CAPTURE_CHANNEL_ERROR_EMBED_LONG_LINE MK_BIT32(13) +/** Embedded bytes found between line start and line end*/ +#define CAPTURE_CHANNEL_ERROR_EMBED_SPURIOUS MK_BIT32(12) +/** Too many embeded lines in frame */ +#define CAPTURE_CHANNEL_ERROR_EMBED_RUNAWAY MK_BIT32(11) +/** Two embedded line starts without a line end in between */ +#define CAPTURE_CHANNEL_ERROR_EMBED_MISSING_LE MK_BIT32(10) +/** A line has fewer pixels than expected width */ +#define CAPTURE_CHANNEL_ERROR_PIXEL_SHORT_LINE MK_BIT32(9) +/** A line has more pixels than expected width, pixels dropped */ +#define CAPTURE_CHANNEL_ERROR_PIXEL_LONG_LINE MK_BIT32(8) +/** A pixel found between line end and line start markers, dropped */ +#define CAPTURE_CHANNEL_ERROR_PIXEL_SPURIOUS MK_BIT32(7) +/** Too many pixel lines in frame, extra lines dropped */ +#define CAPTURE_CHANNEL_ERROR_PIXEL_RUNAWAY MK_BIT32(6) +/** Two lines starts without a line end in between */ +#define CAPTURE_CHANNEL_ERROR_PIXEL_MISSING_LE MK_BIT32(5) +/**@}*/ + +/** + * @defgroup VIUnitIds + * VI Unit Identifiers + */ +/**@{*/ +/** VI unit 0 */ +#define VI_UNIT_VI MK_U32(0x0000) +/** VI unit 1 */ +#define VI_UNIT_VI2 MK_U32(0x0001) +/**@}*/ + +/** + * @brief Identifies a specific CSI stream. + */ +struct csi_stream_config { + /** See @ref NvCsiStream "NVCSI stream id" */ + uint32_t stream_id; + /** See @ref NvCsiPort "NvCSI Port" */ + uint32_t csi_port; + /** CSI Virtual Channel */ + uint32_t virtual_channel; + /** Reserved */ + uint32_t pad__; +}; + +/** + * @brief Describes RTCPU side resources for a capture pipe-line. + */ +struct capture_channel_config { + /** + * A bitmask describing the set of non-shareable + * HW resources that the capture channel will need. These HW resources + * will be assigned to the new capture channel and will be owned by the + * channel until it is released with CAPTURE_CHANNEL_RELEASE_REQ. + * + * The HW resources that can be assigned to a channel include a VI + * channel, ISPBUF A/B interface (T18x only), Focus Metric Lite module (FML). + * + * VI channels can have different capabilities. The flags are checked + * against the VI channel capabilities to make sure the allocated VI + * channel meets the requirements. + * + * See @ref VICaptureChannelFlags "Capture Channel Flags". + */ + uint32_t channel_flags; + + /** rtcpu internal data field - Should be set to zero */ + uint32_t channel_id; + + /** VI unit ID. See @ref ViUnitIds "VI Unit Identifiers". */ + uint32_t vi_unit_id; + + /** Reserved */ + uint32_t pad__; + + /** + * A bitmask indicating which VI channels to consider for allocation. LSB is VI channel 0. + * This allows the client to enforce allocation of HW VI channel in particular range for its own + * purpose. + * + * Beware that client VM may have restricted range of available VI channels. + * + * In most of the cases client can set to ~0ULL to let RTCPU to allocate any available channel + * permitted for client VM. + * + * This mask is expected to be useful for following use-cases: + * 1. Debugging functionality of particular HW VI channel. + * 2. Verify that RTCPU enforces VI channel permissions defined in VM DT. + */ + uint64_t vi_channel_mask; + + /** + * A bitmask indicating which VI2 channels to consider for allocation. LSB is VI2 channel 0. + * This allows the client to enforce allocation of HW VI channel in particular range for its own + * purpose. + * + * Beware that client VM may have restricted range of available VI2 channels. + * + * In most of the cases client can set to ~0ULL to let RTCPU to allocate any available channel + * permitted for client VM. + * + * This mask is expected to be useful for following use-cases: + * 1. Debugging functionality of particular HW VI2 channel. + * 2. Verify that RTCPU enforces VI channel permissions defined in VM DT. + */ + uint64_t vi2_channel_mask; + + /** + * CSI stream configuration identifies the CSI stream input for this channel. + */ + struct csi_stream_config csi_stream; + + /** + * Base address of a memory mapped ring buffer containing capture requests. + * The size of the buffer is queue_depth * request_size + */ + iova_t requests; + + /** + * Base address of a memory mapped ring buffer containing capture requests buffer + * information. + * The size of the buffer is queue_depth * request_memoryinfo_size + */ + iova_t requests_memoryinfo; + + /** + * Maximum number of capture requests in the requests queue. + * Determines the size of the ring buffer. + */ + uint32_t queue_depth; + /** Size of the buffer reserved for each capture request. */ + uint32_t request_size; + + /** Size of the memoryinfo buffer reserved for each capture request. */ + uint32_t request_memoryinfo_size; + + /** Reserved */ + uint32_t reserved2; + + /** SLVS-EC main stream */ + uint8_t slvsec_stream_main; + /** SLVS-EC sub stream */ + uint8_t slvsec_stream_sub; + /** Reserved */ + uint16_t reserved1; + +#define HAVE_VI_GOS_TABLES + /** + * GoS tables can only be programmed when there are no + * active channels. For subsequent channels we check that + * the channel configuration matches with the active + * configuration. + * + * Number of Grid of Semaphores (GOS) tables + */ + uint32_t num_vi_gos_tables; + /** VI GOS tables */ + iova_t vi_gos_tables[VI_NUM_GOS_TABLES]; + + /** Capture progress syncpoint info */ + struct syncpoint_info progress_sp; + /** Embedded data syncpoint info */ + struct syncpoint_info embdata_sp; + /** VI line timer syncpoint info */ + struct syncpoint_info linetimer_sp; + + /** + * User-defined HSM error reporting policy is specified by error masks bits + * + * CAPTURE_CHANNEL_FLAG_ENABLE_HSM_ERROR_MASKS must be set to enable these error masks, + * otherwise default HSM reporting policy is used. + * + * VI-falcon reports error to EC/HSM as uncorrected if error is not masked + * in "Uncorrected" mask. + * VI-falcon reports error to EC/HSM as corrected if error is masked + * in "Uncorrected" mask and not masked in "Errors with threshold" mask. + * VI-falcon does not report error to EC/HSM if error masked + * in both "Uncorrected" and "Errors with threshold" masks. + */ + /** + * Error mask for "uncorrected" errors. See @ref CaptureChannelErrMask "Channel Error bitmask". + * There map to the uncorrected error line in HSM + */ + uint32_t error_mask_uncorrectable; + /** + * Error mask for "errors with threshold". + * See @ref CaptureChannelErrMask "Channel Error bitmask". + * These map to the corrected error line in HSM */ + uint32_t error_mask_correctable; + + /** + * Capture will stop for errors selected in this bit masks. + * Bit definitions are same as in CAPTURE_STATUS_NOTIFY_BIT_* macros. + */ + uint64_t stop_on_error_notify_bits; + +} CAPTURE_IVC_ALIGN; + +/** + * @brief VI Channel configuration + * + * VI unit register programming for capturing a frame. + */ +struct vi_channel_config { + /** DT override enabled flag */ + unsigned dt_enable:1; + /** Embedded data enabled flag */ + unsigned embdata_enable:1; + /** Flush notice enabled flag */ + unsigned flush_enable:1; + /** Periodic flush notice enabled flag */ + unsigned flush_periodic:1; + /** Line timer enabled flag */ + unsigned line_timer_enable:1; + /** Periodic line timer notice enabled flag */ + unsigned line_timer_periodic:1; + /** Enable PIXFMT writing pixels flag */ + unsigned pixfmt_enable:1; + /** Flag to enable merging adjacent RAW8/RAW10 pixels */ + unsigned pixfmt_wide_enable:1; + /** Flag to enable big or little endian. 0 - Big Endian, 1 - Little Endian */ + unsigned pixfmt_wide_endian:1; + /** Flag to enable Phase Detection Auto Focus (PDAF) pixel replacement */ + unsigned pixfmt_pdaf_replace_enable:1; + /** ISPA buffer enabled */ + unsigned ispbufa_enable:1; + /** ISPB buffer enabled. Not valid for T186 & T194 */ + unsigned ispbufb_enable:1; + /** VI Companding module enable flag */ + unsigned compand_enable:1; + /** Reserved bits */ + unsigned pad_flags__:19; + + /* VI channel selector */ + struct match_rec { + /** Datatype to be sent to the channel */ + uint8_t datatype; + /** Bits of datatype to match on */ + uint8_t datatype_mask; + /** CSIMUX source to send to this channel */ + uint8_t stream; + /** Bits of STREAM to match on. */ + uint8_t stream_mask; + /** Virtual channel to be sent to this channel */ + uint16_t vc; + /** Bits of VIRTUAL_CHANNEL_MASK to match on */ + uint16_t vc_mask; + /** Frame id to be sent to this channel */ + uint16_t frameid; + /** Bits of FRAME_ID to match on. */ + uint16_t frameid_mask; + /** Data in the first pixel of a line to match on */ + uint16_t dol; + /** Bits of DOL to match on */ + uint16_t dol_mask; + } match; + + /** DOL header select */ + uint8_t dol_header_sel; + /** Data type override */ + uint8_t dt_override; + /** DPCM mode to be used. Currently DPCM is not used */ + uint8_t dpcm_mode; + /** Reserved */ + uint8_t pad_dol_dt_dpcm__; + + struct vi_frame_config { + /** Pixel width of frame before cropping */ + uint16_t frame_x; + /** Line height of frame */ + uint16_t frame_y; + /** Maximum number of embedded data bytes on a line */ + uint32_t embed_x; + /** Number of embedded lines in frame */ + uint32_t embed_y; + struct skip_rec { + /** + * Number of packets to skip on output at start of line. + * Counted in groups of 8 pixels + */ + uint16_t x; + /** Number of lines to skip at top of the frame */ + uint16_t y; + } skip; + struct crop_rec { + /** Line width in pixels after which no packets will be transmitted */ + uint16_t x; + /** Height in lines after which no lines will be transmitted */ + uint16_t y; + } crop; + } frame; + + /** Pixel line count at which a flush notice is sent out */ + uint16_t flush; + /** Line count at which to trip the first flush event */ + uint16_t flush_first; + + /** Pixel line count at which a notification is sent out*/ + uint16_t line_timer; + /** Line count at which to trip the first line timer event */ + uint16_t line_timer_first; + + /* Pixel formatter */ + struct pixfmt_rec { + /** Pixel memory format for the VI channel */ + uint16_t format; + /** Zero padding control for RAW8/10/12/14->T_R16 and RAW20/24->T_R32 */ + uint8_t pad0_en; + /** Reserved */ + uint8_t pad__; + struct pdaf_rec { + /** Within a line, X pixel position at which PDAF separation begins */ + uint16_t crop_left; + /** Within a line, X pixel position at which PDAF separation ends*/ + uint16_t crop_right; + /** Line at which PDAF separation begins */ + uint16_t crop_top; + /** line at which PDAF separation ends */ + uint16_t crop_bottom; + /** Within a line, X pixel position at which PDAF replacement begins*/ + uint16_t replace_crop_left; + /** Within a line, X pixel position at which PDAF replacement ends*/ + uint16_t replace_crop_right; + /** Line at which PDAF replacement begins */ + uint16_t replace_crop_top; + /** Line at which PDAF repalcement ends */ + uint16_t replace_crop_bottom; + /** X coordinate of last PDAF pixel within the PDAF crop window */ + uint16_t last_pixel_x; + /** Y coordinate of last PDAF pixel within the PDAF crop window */ + uint16_t last_pixel_y; + /** Value to replace PDAF pixel with */ + uint16_t replace_value; + /** Memory format in which the PDAF pixels will be written in */ + uint8_t format; + /** Reserved */ + uint8_t pad_pdaf__; + } pdaf; + } pixfmt; + + /* Pixel DPCM */ + struct dpcm_rec { + /** Number of pixels in the strip */ + uint16_t strip_width; + /** Number of packets in overfetch region */ + uint16_t strip_overfetch; + + /** Not for T186 or earlier */ + /** Number of packets in first generated chunk (no OVERFETCH region in first chunk) */ + uint16_t chunk_first; + /** Number of packets in “body” chunks (including OVERFETCH region, if enabled) */ + uint16_t chunk_body; + /** Number of “body” chunks to emit */ + uint16_t chunk_body_count; + /** + * Number of packets in chunk immediately after “body” chunks (including OVERFETCH + * region, if enabled) + */ + uint16_t chunk_penultimate; + /** Number of packets in final generated chunk (including OVERFETCH region, if enabled) */ + uint16_t chunk_last; + /** Reserved */ + uint16_t pad__; + /** Maximum value to truncate input data to */ + uint32_t clamp_high; + /** Minimum value to truncate input data to */ + uint32_t clamp_low; + } dpcm; + + /* Atom packer */ + struct atomp_rec { + struct surface_rec { + /** Offset within memory buffer */ + uint32_t offset; + /** Memory handle of the buffer. Must be valid handle or 0 */ + uint32_t offset_hi; + } surface[VI_NUM_ATOMP_SURFACES]; + /** Line stride of the surface in bytes */ + uint32_t surface_stride[VI_NUM_ATOMP_SURFACES]; + /** DPCM chunk stride (distance from start of chunk to end of chunk) */ + uint32_t dpcm_chunk_stride; + } atomp; + + /** Reserved */ + uint16_t pad__[2]; + +} CAPTURE_IVC_ALIGN; + +/** + * @brief Engine status buffer base address. + */ +struct engine_status_surface { + /** Offset within memory buffer */ + uint32_t offset; + /** Memory handle of the buffer. Must be valid handle or 0 */ + uint32_t offset_hi; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NVCSI error status + * + * Represents error reported from CSI source used by capture descriptor. + * + */ +struct nvcsi_error_status { + /** + * NVCSI @ref NvCsiStreamErrors "errors" reported for stream used by capture descriptor + * + * Stream error affects multiple virtual channel. + * It will be is reported only once, for the first capture channel + * which retrieved the error report. + * + * This error cause data packet drops and should trigger VI errors in + * affected virtual channels. + */ + uint32_t nvcsi_stream_bits; + + /** + * @defgroup NvCsiStreamErrors + * NVCSI Stream error bits + */ + /** @{ */ +#define NVCSI_STREAM_ERR_STAT_PH_BOTH_CRC_ERR MK_BIT32(1) +#define NVCSI_STREAM_ERR_STAT_PH_ECC_MULTI_BIT_ERR MK_BIT32(0) + /** @} */ + + /** + * NVCSI @ref NvcsiVirtualChannelErrors "errors" reported for stream virtual channel used by capture descriptor + * These errors are expected to be forwarded to VI and also reported by VI as CSIMUX Frame CSI_FAULT errors + */ + uint32_t nvcsi_virtual_channel_bits; + + /** + * @defgroup NvCsiVirtualChannelErrors + * NVCSI Virtual Channel error bits + */ + /** @{ */ +#define NVCSI_VC_ERR_INTR_STAT_PH_SINGLE_CRC_ERR_VC0 MK_BIT32(4) +#define NVCSI_VC_ERR_INTR_STAT_PD_WC_SHORT_ERR_VC0 MK_BIT32(3) +#define NVCSI_VC_ERR_INTR_STAT_PD_CRC_ERR_VC0 MK_BIT32(2) +#define NVCSI_VC_ERR_INTR_STAT_PH_ECC_SINGLE_BIT_ERR_VC0 MK_BIT32(1) +#define NVCSI_VC_ERR_INTR_STAT_PPFSM_TIMEOUT_VC0 MK_BIT32(0) + /** @} */ + + /** + * NVCSI errors reported for CIL interface used by capture descriptor + */ + /** NVCSI CIL A @ref NvCsiCilErrors "errors" */ + uint32_t cil_a_error_bits; + /** NVCSI CIL B @ref NvCsiCilErrors "errors" */ + uint32_t cil_b_error_bits; + + /** + * @defgroup NvCsiCilErrors + * NVCSI CIL error bits + */ + /** @{ */ +#define NVCSI_ERR_CIL_DATA_LANE_SOT_2LSB_ERR1 MK_BIT32(16) +#define NVCSI_ERR_CIL_DATA_LANE_SOT_2LSB_ERR0 MK_BIT32(15) +#define NVCSI_ERR_CIL_DATA_LANE_ESC_MODE_SYNC_ERR1 MK_BIT32(14) +#define NVCSI_ERR_CIL_DATA_LANE_ESC_MODE_SYNC_ERR0 MK_BIT32(13) +#define NVCSI_ERR_DPHY_CIL_LANE_ALIGN_ERR MK_BIT32(12) +#define NVCSI_ERR_DPHY_CIL_DESKEW_CALIB_ERR_CTRL MK_BIT32(11) +#define NVCSI_ERR_DPHY_CIL_DESKEW_CALIB_ERR_LANE1 MK_BIT32(10) +#define NVCSI_ERR_DPHY_CIL_DESKEW_CALIB_ERR_LANE0 MK_BIT32(9) +#define NVCSI_ERR_CIL_DATA_LANE_RXFIFO_FULL_ERR1 MK_BIT32(8) +#define NVCSI_ERR_CIL_DATA_LANE_CTRL_ERR1 MK_BIT32(7) +#define NVCSI_ERR_CIL_DATA_LANE_SOT_MB_ERR1 MK_BIT32(6) +#define NVCSI_ERR_CIL_DATA_LANE_SOT_SB_ERR1 MK_BIT32(5) +#define NVCSI_ERR_CIL_DATA_LANE_RXFIFO_FULL_ERR0 MK_BIT32(4) +#define NVCSI_ERR_CIL_DATA_LANE_CTRL_ERR0 MK_BIT32(3) +#define NVCSI_ERR_CIL_DATA_LANE_SOT_MB_ERR0 MK_BIT32(2) +#define NVCSI_ERR_CIL_DATA_LANE_SOT_SB_ERR0 MK_BIT32(1) +#define NVCSI_ERR_DPHY_CIL_CLK_LANE_CTRL_ERR MK_BIT32(0) + /** @} */ +}; + +/** + * @brief Frame capture status record + */ +struct capture_status { + /** CSI stream number */ + uint8_t src_stream; + /** CSI virtual channel number */ + uint8_t virtual_channel; + /** Frame sequence number */ + uint16_t frame_id; + /** Capture status. See @ref CaptureStatusCodes "codes". */ + uint32_t status; + +/** + * @defgroup CaptureStatusCodes + * Capture status codes + */ +/** @{ */ +/** Capture status unknown. + * Value of @ref err_data "err_data" is undefined. + */ +#define CAPTURE_STATUS_UNKNOWN MK_U32(0) +/** Capture status success. + * Value of @ref err_data "err_data" is undefined. + */ +#define CAPTURE_STATUS_SUCCESS MK_U32(1) +/** CSIMUX frame error. + * + * Maps to VI CSIMUX_FRAME event. + * + * See @ref err_data "err_data" with the VI event payload. + * + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_CSIMUX_FRAME MK_U32(2) +/** CSIMUX stream error. + * + * Maps to VI CSIMUX_STREAM event. + * + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_CSIMUX_STREAM MK_U32(3) +/** Data-specific fault in a channel. + * Maps to VI CHANSEL_FAULT event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_CHANSEL_FAULT MK_U32(4) +/** Data-specific fault in a channel. + * FE packet was force inserted. Maps to VI CHANSEL_FAULT_FE event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_CHANSEL_FAULT_FE MK_U32(5) +/** SOF matches a channel that is already in a frame. + * Maps to VI CHANSEL_COLLISION event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_CHANSEL_COLLISION MK_U32(6) +/** Frame End appears from NVCSI before the normal number of pixels. + * Maps to VI CHANSEL_SHORT_FRAME event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_CHANSEL_SHORT_FRAME MK_U32(7) +/** Single surface packer has overflowed. + * Maps to VI ATOMP_PACKER_OVERFLOW event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_ATOMP_PACKER_OVERFLOW MK_U32(8) +/** Frame interrupted mid-frame. + * Maps to VI ATOMP_FRAME_TRUNCATED event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_ATOMP_FRAME_TRUNCATED MK_U32(9) +/** Frame interrupted without writing any data out. + * Maps to VI ATOMP_FRAME_TOSSED event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_ATOMP_FRAME_TOSSED MK_U32(10) +/** ISP buffer FIFO overflowed. + * Maps to VI CSIMUX_FRAME event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_ISPBUF_FIFO_OVERFLOW MK_U32(11) +/** Capture status out of sync. + * Value of @ref err_data "err_data" is undefined. + */ +#define CAPTURE_STATUS_SYNC_FAILURE MK_U32(12) +/** VI notifier backend down. + * Value of @ref err_data "err_data" is undefined. + */ +#define CAPTURE_STATUS_NOTIFIER_BACKEND_DOWN MK_U32(13) +/** Falcon error. + * Value of @ref err_data "err_data" is defined in + " VI Microcode IAS v0.5.13, section 2.3.3". + */ +#define CAPTURE_STATUS_FALCON_ERROR MK_U32(14) +/** Data does not match any active channel. + * Maps to VI CHANSEL_NOMATCH event. + * See @ref err_data "err_data" with the VI event payload. + * Please refer to T19x Video Input (“VI5”) Application Note: + * Programming NOTIFY v0.0.0" for details. + */ +#define CAPTURE_STATUS_CHANSEL_NOMATCH MK_U32(15) +/** Capture status for invalid VI capture settings. + * Value of @ref err_data "err_data" is undefined. + */ +#define CAPTURE_STATUS_INVALID_CAP_SETTINGS MK_U32(16) +/** @} */ + + /** Start of Frame (SOF) timestamp (ns) */ + uint64_t sof_timestamp; + /** End of Frame (EOF) timestamp (ns) */ + uint64_t eof_timestamp; + /** + * Extended error data. The content depends on the value in @ref status. + * See @ref CaptureStatusCodes for references. + */ + uint32_t err_data; + +/** + * @defgroup CaptureStatusFlags Capture status flags + */ +/** @{ */ + /** Channel encountered unrecoverable error and must be reset */ +#define CAPTURE_STATUS_FLAG_CHANNEL_IN_ERROR MK_BIT32(1) +/** @} */ + + /** See @ref CaptureStatusFlags "Capture status flags" */ + uint32_t flags; + + /** + * VI error notifications logged in capture channel since previous capture. + * See @ref ViNotifyErrorTag "VI notify error bitmask" + * + * Please refer to "[1] VI Microcode IAS v0.5.13" and + * "[2] T19x Video Input (“VI5”) Application Note: Programming NOTIFY v0.0.0" + * for more information on the meaning of individual error bits. + */ + uint64_t notify_bits; + + /** + * @defgroup ViNotifyErrorTag + * Error bit definitions for the @ref notify_bits field + */ + /** @{ */ + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESEVED_0 MK_BIT64(1) + /** Frame start fault */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_FS_FAULT MK_BIT64(2) + /** Frame end forced by CSIMUX stream reset or stream timeout */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_FORCE_FE_FAULT MK_BIT64(3) + /** Frame ID fault in frame end packet */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_FE_FRAME_ID_FAULT MK_BIT64(4) + /** Pixel enable fault in pixel packet header */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_PXL_ENABLE_FAULT MK_BIT64(5) + + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_1 MK_BIT64(6) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_2 MK_BIT64(7) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_3 MK_BIT64(8) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_4 MK_BIT64(9) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_5 MK_BIT64(10) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_6 MK_BIT64(11) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_7 MK_BIT64(12) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_8 MK_BIT64(13) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_RESERVED_9 MK_BIT64(14) + + /** CSI pixel parser finite state machine timeout */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_CSI_FAULT_PPFSM_TIMEOUT MK_BIT64(15) + /** CSI single bit error corrected in packet header by ECC */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_CSI_FAULT_PH_ECC_SINGLE_BIT_ERR MK_BIT64(16) + /** CSI CRC Error in payload data */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_CSI_FAULT_PD_CRC_ERR MK_BIT64(17) + /** CSI payload data word count short error */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_CSI_FAULT_PD_WC_SHORT_ERR MK_BIT64(18) + /** CSI packet header single CRC error */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_CSI_FAULT_PH_SINGLE_CRC_ERR MK_BIT64(19) + /** CSI embedded data line CRC error */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_FRAME_CSI_FAULT_EMBEDDED_LINE_CRC_ERR MK_BIT64(20) + + /** + * Spurious data detected between valid frames or before first frame. + * This can be a badly corrupted frame or some random bits. + * This error doesn't have an effect on the captured frame. + */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_STREAM_SPURIOUS_DATA MK_BIT64(21) + /** Stream FIFO overflow. This error is unrecoverable. */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_STREAM_FIFO_OVERFLOW MK_BIT64(22) + /** Stream loss of frame error. This error is unrecoverable. */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_STREAM_FIFO_LOF MK_BIT64(23) + /** + * Illegal data packet was encountered and dropped by CSIMUX. + * This error may have no effect on capture result or trigger + * other errors if frame got corrupted. + */ + #define CAPTURE_STATUS_NOTIFY_BIT_CSIMUX_STREAM_FIFO_BADPKT MK_BIT64(24) + + /** + * Timeout from frame descriptor activation to frame start. + * See also frame_start_timeout in struct capture_descriptor + */ + #define CAPTURE_STATUS_NOTIFY_BIT_FRAME_START_TIMEOUT MK_BIT64(25) + + /** + * Timeout from frame start to frame completion. + * See also frame_completion_timeout in struct capture_descriptor + */ + #define CAPTURE_STATUS_NOTIFY_BIT_FRAME_COMPLETION_TIMEOUT MK_BIT64(26) + + /** Missing line end packet in pixel data line */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_PIXEL_MISSING_LE MK_BIT64(30) + /** Frame has too many lines */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_PIXEL_RUNAWAY MK_BIT64(31) + /** Pixel data received without line start packet */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_PIXEL_SPURIOUS MK_BIT64(32) + /** Pixel data line is too long */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_PIXEL_LONG_LINE MK_BIT64(33) + /** Pixel data line is too short */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_PIXEL_SHORT_LINE MK_BIT64(34) + /** Missing line end packet in embedded data line */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_EMBED_MISSING_LE MK_BIT64(35) + /** Frame has too many lines of embedded data */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_EMBED_RUNAWAY MK_BIT64(36) + /** Embedded data received without line start packet */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_EMBED_SPURIOUS MK_BIT64(37) + /** Embedded data line is too long */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_EMBED_LONG_LINE MK_BIT64(38) + /** Embedded data received when not expected */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_EMBED_INFRINGE MK_BIT64(39) + /** Invalid pixel data type in pixel packet or line start packet */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_DTYPE_MISMATCH MK_BIT64(40) + /** Reserved */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_RESERVED_0 MK_BIT64(41) + + /** Frame to short - too few pixel data lines */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_PIX_SHORT MK_BIT64(42) + + /** Frame to short - too few embedded data lines */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_EMB_SHORT MK_BIT64(43) + + /** VI hardware failure detected by Permanent Fault Software Diagnostics (PFSD) */ + #define CAPTURE_STATUS_NOTIFY_BIT_PFSD_FAULT MK_BIT64(44) + + /** Frame end forced by channel reset */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_FAULT_FE MK_BIT64(45) + + /** + * Incoming frame not matched by any VI channel. + * This error is usually caused by not having a pending + * capture request ready to catch the incoming frame. + * The frame will be dropped. + */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_NO_MATCH MK_BIT64(46) + + /** + * More than one VI channel match the same incoming frame. + * Two or more channels have been configured for the same + * sensor. Only one of the channels will capture the frame. + */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_COLLISION MK_BIT64(47) + + /** Channel reconfigured while in frame */ + #define CAPTURE_STATUS_NOTIFY_BIT_CHANSEL_LOAD_FRAMED MK_BIT64(48) + + /** Internal overflow in ATOMP packer. Should not happen. */ + #define CAPTURE_STATUS_NOTIFY_BIT_ATOMP_PACKER_OVERFLOW MK_BIT64(49) + + /** Frame truncated while writing to system memory. Indicates memory back-pressure. */ + #define CAPTURE_STATUS_NOTIFY_BIT_ATOMP_FRAME_TRUNCATED MK_BIT64(50) + + /** Frame dropped while writing to system memory. Indicates memory back-pressure. */ + #define CAPTURE_STATUS_NOTIFY_BIT_ATOMP_FRAME_TOSSED MK_BIT64(51) + + /** Non-classified error */ + #define CAPTURE_STATUS_NOTIFY_BIT_NON_CLASSIFIED_0 MK_BIT64(63) + /** @} */ + + /** + * NVCSI error status. + * + * Error bits representing errors which were reported by NVCSI since + * previous capture. + * + * Multiple errors of same kind are collated into single bit. + * + * NVCSI error status is likely, but not guaranteed to affect current frame: + * + * 1. NVCSI error status is retrieved at end-of-frame VI event. NVCSI may already + * retrieve next frame data at this time. + * + * 2. NVCSI Error may also indicate error from older CSI data if there were frame + * skips between captures. + * + */ + struct nvcsi_error_status nvcsi_err_status; + +} CAPTURE_IVC_ALIGN; + +/** + * @brief The compand configuration describes a piece-wise linear + * tranformation function used by the VI companding module. + */ +#define VI_NUM_COMPAND_KNEEPTS MK_SIZE(10) +struct vi_compand_config { + /** Input position for this knee point */ + uint32_t base[VI_NUM_COMPAND_KNEEPTS]; + /** Scale above this knee point */ + uint32_t scale[VI_NUM_COMPAND_KNEEPTS]; + /** Output offset for this knee point */ + uint32_t offset[VI_NUM_COMPAND_KNEEPTS]; +} CAPTURE_IVC_ALIGN; + +/* + * @brief VI Phase Detection Auto Focus (PDAF) configuration + * + * The PDAF data consists of special pixels that will be extracted from a frame + * and written to a separate surface. The PDAF pattern is shared by all capture channels + * and should be configured before enabling PDAF pixel extraction for a specific capture. + * + * Pixel { x, y } will be ouput to the PDAF surface (surface1) if the + * bit at position (x % 32) in pattern[y % 32] is set. + * + * Pixel { x, y } in the main output surface (surface0) will be + * replaced by a default pixel value if the bit at position (x % 32) + * in pattern_replace[y % 32] is set. + */ +#define VI_PDAF_PATTERN_SIZE 32 +struct vi_pdaf_config { + /** + * Pixel bitmap, by line PATTERN[y0][x0] is set if the pixel (x % 32) == x0, (y % 32) == y0 + * should be output to the PDAF surface + */ + uint32_t pattern[VI_PDAF_PATTERN_SIZE]; + /** + * Pixel bitmap to be used for Replace the pdaf pixel, by line + * PATTERN_REPLACE[y0][x0] is set if the pixel (x % 32) == x0, (y % 32) == y0 + * should be output to the PDAF surface + */ + uint32_t pattern_replace[VI_PDAF_PATTERN_SIZE]; +} CAPTURE_IVC_ALIGN; + +/* + * @brief VI SYNCGEN unit configuration. + */ +struct vi_syncgen_config { + /** + * Half cycle - Unsigned floating point. + * Decimal point position is given by FRAC_BITS in HCLK_DIV_FMT. + * Frequency of HCLK = SYNCGEN_CLK / (HALF_CYCLE * 2) + */ + uint32_t hclk_div; + /** Number of fractional bits of HALF_CYCLE */ + uint8_t hclk_div_fmt; + /** Horizontal sync signal */ + uint8_t xhs_width; + /** Vertical sync signal */ + uint8_t xvs_width; + /** Cycles to delay after XVS before assert XHS */ + uint8_t xvs_to_xhs_delay; + /** Resevred - UNUSED */ + uint16_t cvs_interval; + /** Reserved */ + uint16_t pad1__; + /** Reserved */ + uint32_t pad2__; +} CAPTURE_IVC_ALIGN; + + +/** + * @brief VI PFSD Configuration. + * + * PDAF replacement function is used in PFSD mode. Pixels within ROI are replaced + * by test pattern, and output pixels from the ROI are compared against expected + * values. + */ +struct vi_pfsd_config { + /** + * @brief Area in which the pixels are replaced with test pattern + * + * Note that all coordinates are inclusive. + */ + struct replace_roi_rec { + /** left pixel column of the replacement ROI */ + uint16_t left; + /** right pixel column of the replacement ROI (inclusive) */ + uint16_t right; + /** top pixel row of the replacement ROI */ + uint16_t top; + /** bottom pixel row of the replacement ROI (inclusive) */ + uint16_t bottom; + } replace_roi; + + /** test pattern used to replace pixels within the ROI */ + uint32_t replace_value; + + /** + * Count of items in the @ref expected array. + * If zero, PFSD will not be performed for this frame + */ + uint32_t expected_count; + + /** + * Array of area definitions in output surfaces that shall be verified. + * For YUV422 semi-planar, [0] is Y surface and [1] is UV surface. + */ + struct { + /** Byte offset for the roi from beginning of the surface */ + uint32_t offset; + /** Number of bytes that need to be read from the output surface */ + uint32_t len; + /** Expected value. The 4 byte pattern is repeated until @ref len + * bytes have been compared + */ + uint8_t value[4]; + } expected[VI_NUM_VI_PFSD_SURFACES]; + +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup CaptureFrameFlags + * Capture frame specific flags + */ +/** @{ */ +/** Enables capture status reporting for the channel */ +#define CAPTURE_FLAG_STATUS_REPORT_ENABLE MK_BIT32(0) +/** Enables error reporting for the channel */ +#define CAPTURE_FLAG_ERROR_REPORT_ENABLE MK_BIT32(1) +/** @} */ + +/** + * @brief Memory surface specs passed from KMD to RCE + */ +struct memoryinfo_surface { + /** Surface iova address */ + uint64_t base_address; + /** Surface size */ + uint64_t size; +}; + +/** + * @brief VI capture descriptor memory information + * + * VI capture descriptor memory information shared between + * KMD and RCE only. This information cannot be part of + * capture descriptor since descriptor is shared with usermode + * application. + */ +struct capture_descriptor_memoryinfo { + struct memoryinfo_surface surface[VI_NUM_ATOMP_SURFACES]; + /** Base address of engine status surface */ + uint64_t engine_status_surface_base_address; + /** Size of engine status surface */ + uint64_t engine_status_surface_size; + /** pad for alignment */ + uint32_t reserved32[12]; +} CAPTURE_DESCRIPTOR_ALIGN; + +/** + * @brief VI frame capture context. + */ +struct capture_descriptor { + /** VI frame sequence number*/ + uint32_t sequence; + /** See @ref CaptureFrameFlags "Capture frame specific flags" */ + uint32_t capture_flags; + /** Task descriptor frame start timeout in milliseconds */ + uint16_t frame_start_timeout; + /** Task descriptor frame complete timeout in milliseconds */ + uint16_t frame_completion_timeout; + +#define CAPTURE_PREFENCE_ARRAY_SIZE 2 + + /** @deprecated */ + uint32_t prefence_count CAMRTC_DEPRECATED; + /** @deprecated */ + struct syncpoint_info prefence[CAPTURE_PREFENCE_ARRAY_SIZE] CAMRTC_DEPRECATED; + + /** VI Channel configuration */ + struct vi_channel_config ch_cfg; + + /** VI PFSD Configuration */ + struct vi_pfsd_config pfsd_cfg; + + /** Engine result record – written by Falcon */ + struct engine_status_surface engine_status; + + /** Capture result record – written by RCE */ + struct capture_status status; + + /** Reserved */ + uint32_t pad32__[14]; + +} CAPTURE_DESCRIPTOR_ALIGN; + +/** + * @brief - Event data used for event injection + */ +struct event_inject_msg { + /** UMD populates with capture status events. RCE converts to reg offset */ + uint32_t tag; + /** Timestamp of event */ + uint32_t stamp; + /** Bits [0:31] of event data */ + uint32_t data; + /** Bits [32:63] of event data */ + uint32_t data_ext; +}; + +#define VI_HSM_CHANSEL_ERROR_MASK_BIT_NOMATCH MK_U32(1) +/** + * @brief VI EC/HSM global CHANSEL error masking + */ +struct vi_hsm_chansel_error_mask_config { + /** "Errors with threshold" bit mask */ + uint32_t chansel_correctable_mask; + /** "Uncorrected error" bit mask */ + uint32_t chansel_uncorrectable_mask; +} CAPTURE_IVC_ALIGN; + +/** + * NvPhy attributes + */ +/** + * @defgroup NvPhyType + * NvCSI Physical stream type + * @{ + */ +#define NVPHY_TYPE_CSI MK_U32(0) +#define NVPHY_TYPE_SLVSEC MK_U32(1) +/**@}*/ + +/** + * NVCSI attributes + */ +/** + * @defgroup NvCsiPort NvCSI Port + * @{ + */ +#define NVCSI_PORT_A MK_U32(0x0) +#define NVCSI_PORT_B MK_U32(0x1) +#define NVCSI_PORT_C MK_U32(0x2) +#define NVCSI_PORT_D MK_U32(0x3) +#define NVCSI_PORT_E MK_U32(0x4) +#define NVCSI_PORT_F MK_U32(0x5) +#define NVCSI_PORT_G MK_U32(0x6) +#define NVCSI_PORT_H MK_U32(0x7) +#define NVCSI_PORT_UNSPECIFIED MK_U32(0xFFFFFFFF) +/**@}*/ + +/** + * @defgroup NvCsiStream NVCSI stream id + * @{ + */ +#define NVCSI_STREAM_0 MK_U32(0x0) +#define NVCSI_STREAM_1 MK_U32(0x1) +#define NVCSI_STREAM_2 MK_U32(0x2) +#define NVCSI_STREAM_3 MK_U32(0x3) +#define NVCSI_STREAM_4 MK_U32(0x4) +#define NVCSI_STREAM_5 MK_U32(0x5) +/**@}*/ + +/** + * @defgroup NvCsiVirtualChannel NVCSI virtual channels + * @{ + */ +#define NVCSI_VIRTUAL_CHANNEL_0 MK_U32(0x0) +#define NVCSI_VIRTUAL_CHANNEL_1 MK_U32(0x1) +#define NVCSI_VIRTUAL_CHANNEL_2 MK_U32(0x2) +#define NVCSI_VIRTUAL_CHANNEL_3 MK_U32(0x3) +#define NVCSI_VIRTUAL_CHANNEL_4 MK_U32(0x4) +#define NVCSI_VIRTUAL_CHANNEL_5 MK_U32(0x5) +#define NVCSI_VIRTUAL_CHANNEL_6 MK_U32(0x6) +#define NVCSI_VIRTUAL_CHANNEL_7 MK_U32(0x7) +#define NVCSI_VIRTUAL_CHANNEL_8 MK_U32(0x8) +#define NVCSI_VIRTUAL_CHANNEL_9 MK_U32(0x9) +#define NVCSI_VIRTUAL_CHANNEL_10 MK_U32(0xA) +#define NVCSI_VIRTUAL_CHANNEL_11 MK_U32(0xB) +#define NVCSI_VIRTUAL_CHANNEL_12 MK_U32(0xC) +#define NVCSI_VIRTUAL_CHANNEL_13 MK_U32(0xD) +#define NVCSI_VIRTUAL_CHANNEL_14 MK_U32(0xE) +#define NVCSI_VIRTUAL_CHANNEL_15 MK_U32(0xF) +/**@}*/ + +/** + * @defgroup NvCsiConfigFlags NvCSI Configuration Flags + * @{ + */ +/** NVCSI config flags */ +#define NVCSI_CONFIG_FLAG_BRICK MK_BIT32(0) +/** NVCSI config flags */ +#define NVCSI_CONFIG_FLAG_CIL MK_BIT32(1) +/** Enable user-provided error handling configuration */ +#define NVCSI_CONFIG_FLAG_ERROR MK_BIT32(2) +/**@}*/ + +/** + * @brief Number of lanes/trios per brick + */ +#define NVCSI_BRICK_NUM_LANES MK_U32(4) +/** + * @brief Number of override exception data types + */ +#define NVCSI_NUM_NOOVERRIDE_DT MK_U32(5) + +/** + * @defgroup NvCsiPhyType NVCSI physical types + * @{ + */ +/** NVCSI D-PHY physical layer */ +#define NVCSI_PHY_TYPE_DPHY MK_U32(0) +/** NVCSI D-PHY physical layer */ +#define NVCSI_PHY_TYPE_CPHY MK_U32(1) +/** @} */ + +/** + * @defgroup NvCsiLaneSwizzle NVCSI lane swizzles + * @{ + */ +/** 00000 := A0 A1 B0 B1 --> A0 A1 B0 B1 */ +#define NVCSI_LANE_SWIZZLE_A0A1B0B1 MK_U32(0x00) +/** 00001 := A0 A1 B0 B1 --> A0 A1 B1 B0 */ +#define NVCSI_LANE_SWIZZLE_A0A1B1B0 MK_U32(0x01) +/** 00010 := A0 A1 B0 B1 --> A0 B0 B1 A1 */ +#define NVCSI_LANE_SWIZZLE_A0B0B1A1 MK_U32(0x02) +/** 00011 := A0 A1 B0 B1 --> A0 B0 A1 B1 */ +#define NVCSI_LANE_SWIZZLE_A0B0A1B1 MK_U32(0x03) +/** 00100 := A0 A1 B0 B1 --> A0 B1 A1 B0 */ +#define NVCSI_LANE_SWIZZLE_A0B1A1B0 MK_U32(0x04) +/** 00101 := A0 A1 B0 B1 --> A0 B1 B0 A1 */ +#define NVCSI_LANE_SWIZZLE_A0B1B0A1 MK_U32(0x05) +/** 00110 := A0 A1 B0 B1 --> A1 A0 B0 B1 */ +#define NVCSI_LANE_SWIZZLE_A1A0B0B1 MK_U32(0x06) +/** 00111 := A0 A1 B0 B1 --> A1 A0 B1 B0 */ +#define NVCSI_LANE_SWIZZLE_A1A0B1B0 MK_U32(0x07) +/** 01000 := A0 A1 B0 B1 --> A1 B0 B1 A0 */ +#define NVCSI_LANE_SWIZZLE_A1B0B1A0 MK_U32(0x08) +/** 01001 := A0 A1 B0 B1 --> A1 B0 A0 B1 */ +#define NVCSI_LANE_SWIZZLE_A1B0A0B1 MK_U32(0x09) +/** 01010 := A0 A1 B0 B1 --> A1 B1 A0 B0 */ +#define NVCSI_LANE_SWIZZLE_A1B1A0B0 MK_U32(0x0A) +/** 01011 := A0 A1 B0 B1 --> A1 B1 B0 A0 */ +#define NVCSI_LANE_SWIZZLE_A1B1B0A0 MK_U32(0x0B) +/** 01100 := A0 A1 B0 B1 --> B0 A1 A0 B1 */ +#define NVCSI_LANE_SWIZZLE_B0A1A0B1 MK_U32(0x0C) +/** 01101 := A0 A1 B0 B1 --> B0 A1 B1 A0 */ +#define NVCSI_LANE_SWIZZLE_B0A1B1A0 MK_U32(0x0D) +/** 01110 := A0 A1 B0 B1 --> B0 A0 B1 A1 */ +#define NVCSI_LANE_SWIZZLE_B0A0B1A1 MK_U32(0x0E) +/** 01111 := A0 A1 B0 B1 --> B0 A0 A1 B1 */ +#define NVCSI_LANE_SWIZZLE_B0A0A1B1 MK_U32(0x0F) +/** 10000 := A0 A1 B0 B1 --> B0 B1 A1 A0 */ +#define NVCSI_LANE_SWIZZLE_B0B1A1A0 MK_U32(0x10) +/** 10001 := A0 A1 B0 B1 --> B0 B1 A0 A1 */ +#define NVCSI_LANE_SWIZZLE_B0B1A0A1 MK_U32(0x11) +/** 10010 := A0 A1 B0 B1 --> B1 A1 B0 A0 */ +#define NVCSI_LANE_SWIZZLE_B1A1B0A0 MK_U32(0x12) +/** 10011 := A0 A1 B0 B1 --> B1 A1 A0 B0 */ +#define NVCSI_LANE_SWIZZLE_B1A1A0B0 MK_U32(0x13) +/** 10100 := A0 A1 B0 B1 --> B1 B0 A0 A1 */ +#define NVCSI_LANE_SWIZZLE_B1B0A0A1 MK_U32(0x14) +/** 10101 := A0 A1 B0 B1 --> B1 B0 A1 A0 */ +#define NVCSI_LANE_SWIZZLE_B1B0A1A0 MK_U32(0x15) +/** 10110 := A0 A1 B0 B1 --> B1 A0 A1 B0 */ +#define NVCSI_LANE_SWIZZLE_B1A0A1B0 MK_U32(0x16) +/** 10111 := A0 A1 B0 B1 --> B1 A0 B0 A1 */ +#define NVCSI_LANE_SWIZZLE_B1A0B0A1 MK_U32(0x17) +/** @} */ + +/** + * @defgroup NvCsiDPhyPolarity NVCSI D-phy Polarity + * @{ + */ +#define NVCSI_DPHY_POLARITY_NOSWAP MK_U32(0) +#define NVCSI_DPHY_POLARITY_SWAP MK_U32(1) +/** @} */ + +/** + * @defgroup NvCsiCPhyPolarity NVCSI C-phy Polarity + * @{ + */ +/* 000 := A B C --> A B C */ +#define NVCSI_CPHY_POLARITY_ABC MK_U32(0x00) +/* 001 := A B C --> A C B */ +#define NVCSI_CPHY_POLARITY_ACB MK_U32(0x01) +/* 010 := A B C --> B C A */ +#define NVCSI_CPHY_POLARITY_BCA MK_U32(0x02) +/* 011 := A B C --> B A C */ +#define NVCSI_CPHY_POLARITY_BAC MK_U32(0x03) +/* 100 := A B C --> C A B */ +#define NVCSI_CPHY_POLARITY_CAB MK_U32(0x04) +/* 101 := A B C --> C B A */ +#define NVCSI_CPHY_POLARITY_CBA MK_U32(0x05) +/** @} */ + +/** + * @brief NvCSI Brick configuration + */ +struct nvcsi_brick_config { + /** Select PHY @ref NvCsiPhyType "mode" for both partitions */ + uint32_t phy_mode; + /** See @ref NvCsiLaneSwizzle "NVCSI Lane swizzles" control + * for bricks. Valid for C-PHY and D-PHY modes. + */ + uint32_t lane_swizzle; + /** + * Polarity control for each lane. Value depends on @a phy_mode. + * See @ref NvCsiDPhyPolarity "NVCSI D-phy Polarity" + * or @ref NvCsiCPhyPolarity "NVCSI C-phy Polarity" + */ + uint8_t lane_polarity[NVCSI_BRICK_NUM_LANES]; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NvCSI Control and Interface Logic Configuration + */ +struct nvcsi_cil_config { + /** Number of data lanes used (0-4) */ + uint8_t num_lanes; + /** LP bypass mode (boolean) */ + uint8_t lp_bypass_mode; + /** Set MIPI THS-SETTLE timing (LP clock cycles with SoC default clock rate) */ + uint8_t t_hs_settle; + /** Set MIPI TCLK-SETTLE timing (LP clock cycles with SoC default clock rate) */ + uint8_t t_clk_settle; + /** @deprecated */ + uint32_t cil_clock_rate CAMRTC_DEPRECATED; + /** MIPI clock rate for D-Phy. Symbol rate for C-Phy [kHz] */ + uint32_t mipi_clock_rate; + /** Reserved */ + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup HsmCsimuxErrors Bitmask for CSIMUX errors reported to HSM + */ +/** @{ */ +/** Error bit indicating next packet after a frame end was not a frame start */ +#define VI_HSM_CSIMUX_ERROR_MASK_BIT_SPURIOUS_EVENT MK_BIT32(0) +/** Error bit indicating FIFO for the stream has over flowed */ +#define VI_HSM_CSIMUX_ERROR_MASK_BIT_OVERFLOW MK_BIT32(1) +/** Error bit indicating frame start packet lost due to FIFO overflow */ +#define VI_HSM_CSIMUX_ERROR_MASK_BIT_LOF MK_BIT32(2) +/** Error bit indicating that an illegal packet has been sent from NVCSI */ +#define VI_HSM_CSIMUX_ERROR_MASK_BIT_BADPKT MK_BIT32(3) +/** @} */ + +/** + * @brief VI EC/HSM error masking configuration + */ +struct vi_hsm_csimux_error_mask_config { + /** Mask correctable CSIMUX. See @ref HsmCsimuxErrors "CSIMUX error bitmask". */ + uint32_t error_mask_correctable; + /** Mask uncorrectable CSIMUX. See @ref HsmCsimuxErrors "CSIMUX error bitmask". */ + uint32_t error_mask_uncorrectable; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup NVCSI_HOST1X_INTR_FLAGS NVCSI Host1x client global interrupt flags + * @{ + */ +/** Error bit indicating Host1x client timeout error */ +#define NVCSI_INTR_FLAG_HOST1X_TIMEOUT_ERR MK_BIT32(0) +/** @} */ + +/** + * @defgroup NVCSI_STREAM_INTR_FLAGS NVCSI stream novc+vc interrupt flags + * @{ + */ +/** Multi bit error in the DPHY packet header */ +#define NVCSI_INTR_FLAG_STREAM_NOVC_ERR_PH_ECC_MULTI_BIT MK_BIT32(0) +/** Error bit indicating both of the CPHY packet header CRC check fail */ +#define NVCSI_INTR_FLAG_STREAM_NOVC_ERR_PH_BOTH_CRC MK_BIT32(1) +/** Error bit indicating VC Pixel Parser (PP) FSM timeout for a pixel line.*/ +#define NVCSI_INTR_FLAG_STREAM_VC_ERR_PPFSM_TIMEOUT MK_BIT32(2) +/** Error bit indicating VC has packet with single bit ECC error in the packet header*/ +#define NVCSI_INTR_FLAG_STREAM_VC_ERR_PH_ECC_SINGLE_BIT MK_BIT32(3) +/** Error bit indicating VC has packet payload crc check fail */ +#define NVCSI_INTR_FLAG_STREAM_VC_ERR_PD_CRC MK_BIT32(4) +/** Error bit indicating VC has packet terminate before getting the expect word count data. */ +#define NVCSI_INTR_FLAG_STREAM_VC_ERR_PD_WC_SHORT MK_BIT32(5) +/** Error bit indicating VC has one of the CPHY packet header CRC check fail. */ +#define NVCSI_INTR_FLAG_STREAM_VC_ERR_PH_SINGLE_CRC MK_BIT32(6) +/** @} */ + +/** + * @defgroup NVCSI_CIL_INTR_FLAGS NVCSI phy/cil interrupt flags + * @{ + */ +#define NVCSI_INTR_FLAG_CIL_INTR_DPHY_ERR_CLK_LANE_CTRL MK_BIT32(0) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR0_SOT_SB MK_BIT32(1) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR0_SOT_MB MK_BIT32(2) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR0_CTRL MK_BIT32(3) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR0_RXFIFO_FULL MK_BIT32(4) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR1_SOT_SB MK_BIT32(5) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR1_SOT_MB MK_BIT32(6) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR1_CTRL MK_BIT32(7) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR1_RXFIFO_FULL MK_BIT32(8) +#define NVCSI_INTR_FLAG_CIL_INTR_DPHY_DESKEW_CALIB_ERR_LANE0 MK_BIT32(9) +#define NVCSI_INTR_FLAG_CIL_INTR_DPHY_DESKEW_CALIB_ERR_LANE1 MK_BIT32(10) +#define NVCSI_INTR_FLAG_CIL_INTR_DPHY_DESKEW_CALIB_ERR_CTRL MK_BIT32(11) +#define NVCSI_INTR_FLAG_CIL_INTR_DPHY_LANE_ALIGN_ERR MK_BIT32(12) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR0_ESC_MODE_SYNC MK_BIT32(13) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR1_ESC_MODE_SYNC MK_BIT32(14) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR0_SOT_2LSB_FULL MK_BIT32(15) +#define NVCSI_INTR_FLAG_CIL_INTR_DATA_LANE_ERR1_SOT_2LSB_FULL MK_BIT32(16) +/** @} */ + +/** + * @defgroup NVCSI_CIL_INTR0_FLAGS NVCSI phy/cil intr0 flags + * @{ + */ +#define NVCSI_INTR_FLAG_CIL_INTR0_DPHY_ERR_CLK_LANE_CTRL MK_BIT32(0) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR0_SOT_SB MK_BIT32(1) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR0_SOT_MB MK_BIT32(2) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR0_CTRL MK_BIT32(3) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR0_RXFIFO_FULL MK_BIT32(4) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR1_SOT_SB MK_BIT32(5) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR1_SOT_MB MK_BIT32(6) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR1_CTRL MK_BIT32(7) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR1_RXFIFO_FULL MK_BIT32(8) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR0_SOT_2LSB_FULL MK_BIT32(9) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR1_SOT_2LSB_FULL MK_BIT32(10) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR0_ESC_MODE_SYNC MK_BIT32(19) +#define NVCSI_INTR_FLAG_CIL_INTR0_DATA_LANE_ERR1_ESC_MODE_SYNC MK_BIT32(20) +#define NVCSI_INTR_FLAG_CIL_INTR0_DPHY_DESKEW_CALIB_DONE_LANE0 MK_BIT32(22) +#define NVCSI_INTR_FLAG_CIL_INTR0_DPHY_DESKEW_CALIB_DONE_LANE1 MK_BIT32(23) +#define NVCSI_INTR_FLAG_CIL_INTR0_DPHY_DESKEW_CALIB_DONE_CTRL MK_BIT32(24) +#define NVCSI_INTR_FLAG_CIL_INTR0_DPHY_DESKEW_CALIB_ERR_LANE0 MK_BIT32(25) +#define NVCSI_INTR_FLAG_CIL_INTR0_DPHY_DESKEW_CALIB_ERR_LANE1 MK_BIT32(26) +#define NVCSI_INTR_FLAG_CIL_INTR0_DPHY_DESKEW_CALIB_ERR_CTRL MK_BIT32(27) +#define NVCSI_INTR_FLAG_CIL_INTR0_DPHY_LANE_ALIGN_ERR MK_BIT32(28) +#define NVCSI_INTR_FLAG_CIL_INTR0_CPHY_CLK_CAL_DONE_TRIO0 MK_BIT32(29) +#define NVCSI_INTR_FLAG_CIL_INTR0_CPHY_CLK_CAL_DONE_TRIO1 MK_BIT32(30) +/** @} */ + +/** + * @defgroup NVCSI_CIL_INTR1_FLAGS NVCSI phy/cil intr1 flags + * @{ + */ +#define NVCSI_INTR_FLAG_CIL_INTR1_DATA_LANE_ESC_CMD_REC0 MK_BIT32(0) +#define NVCSI_INTR_FLAG_CIL_INTR1_DATA_LANE_ESC_DATA_REC0 MK_BIT32(1) +#define NVCSI_INTR_FLAG_CIL_INTR1_DATA_LANE_ESC_CMD_REC1 MK_BIT32(2) +#define NVCSI_INTR_FLAG_CIL_INTR1_DATA_LANE_ESC_DATA_REC1 MK_BIT32(3) +#define NVCSI_INTR_FLAG_CIL_INTR1_REMOTERST_TRIGGER_INT0 MK_BIT32(4) +#define NVCSI_INTR_FLAG_CIL_INTR1_ULPS_TRIGGER_INT0 MK_BIT32(5) +#define NVCSI_INTR_FLAG_CIL_INTR1_LPDT_INT0 MK_BIT32(6) +#define NVCSI_INTR_FLAG_CIL_INTR1_REMOTERST_TRIGGER_INT1 MK_BIT32(7) +#define NVCSI_INTR_FLAG_CIL_INTR1_ULPS_TRIGGER_INT1 MK_BIT32(8) +#define NVCSI_INTR_FLAG_CIL_INTR1_LPDT_INT1 MK_BIT32(9) +#define NVCSI_INTR_FLAG_CIL_INTR1_DPHY_CLK_LANE_ULPM_REQ MK_BIT32(10) +/** @} */ + +/** + * @defgroup NVCSI_INTR_CONFIG_MASK NVCSI interrupt config bit masks + * @{ + */ +#define NVCSI_INTR_CONFIG_MASK_HOST1X MK_U32(0x1) +#define NVCSI_INTR_CONFIG_MASK_STATUS2VI MK_U32(0xffff) +#define NVCSI_INTR_CONFIG_MASK_STREAM_NOVC MK_U32(0x3) +#define NVCSI_INTR_CONFIG_MASK_STREAM_VC MK_U32(0x7c) +#define NVCSI_INTR_CONFIG_MASK_CIL_INTR MK_U32(0x1ffff) +#define NVCSI_INTR_CONFIG_MASK_CIL_INTR0 MK_U32(0x7fd807ff) +#define NVCSI_INTR_CONFIG_MASK_CIL_INTR1 MK_U32(0x7ff) +/** @} */ + +/** + * @defgroup NVCSI_INTR_CONFIG_MASK_SHIFTS NVCSI interrupt config bit shifts + * @{ + */ +#define NVCSI_INTR_CONFIG_SHIFT_STREAM_NOVC MK_U32(0x0) +#define NVCSI_INTR_CONFIG_SHIFT_STREAM_VC MK_U32(0x2) +/** @} */ + +/** + * @brief User-defined error configuration. + * + * Flag NVCSI_CONFIG_FLAG_ERROR must be set to enable these settings, + * otherwise default settings will be used. + */ +struct nvcsi_error_config { + /** + * @brief Host1x client global interrupt mask (to LIC) + * Bit field mapping: @ref NVCSI_HOST1X_INTR_FLAGS + */ + uint32_t host1x_intr_mask_lic; + /** + * @brief Host1x client global interrupt mask (to HSM) + * Bit field mapping: @ref NVCSI_HOST1X_INTR_FLAGS + */ + uint32_t host1x_intr_mask_hsm; + /** + * @brief Host1x client global interrupt error type classification + * (to HSM) + * Bit field mapping: @ref NVCSI_HOST1X_INTR_FLAGS + * (0 - corrected, 1 - uncorrected) + */ + uint32_t host1x_intr_type_hsm; + + /** NVCSI status2vi forwarding mask (to VI NOTIFY) */ + uint32_t status2vi_notify_mask; + + /** + * @brief NVCSI stream novc+vc interrupt mask (to LIC) + * Bit field mapping: @ref NVCSI_STREAM_INTR_FLAGS + */ + uint32_t stream_intr_mask_lic; + /** + * @brief NVCSI stream novc+vc interrupt mask (to HSM) + * Bit field mapping: @ref NVCSI_STREAM_INTR_FLAGS + */ + uint32_t stream_intr_mask_hsm; + /** + * @brief NVCSI stream novc+vc interrupt error type classification + * (to HSM) + * Bit field mapping: @ref NVCSI_STREAM_INTR_FLAGS + * (0 - corrected, 1 - uncorrected) + */ + uint32_t stream_intr_type_hsm; + + /** + * @brief NVCSI phy/cil interrupt mask (to HSM) + * Bit field mapping: @ref NVCSI_CIL_INTR_FLAGS + */ + uint32_t cil_intr_mask_hsm; + /** + * @brief NVCSI phy/cil interrupt error type classification + * (to HSM) + * Bit field mapping: @ref NVCSI_CIL_INTR_FLAGS + * (0 - corrected, 1 - uncorrected) + */ + uint32_t cil_intr_type_hsm; + /** + * @brief NVCSI phy/cil intr0 interrupt mask (to LIC) + * Bit field mapping: @ref NVCSI_CIL_INTR0_FLAGS + */ + uint32_t cil_intr0_mask_lic; + /** + * @brief NVCSI phy/cil intr1 interrupt mask (to LIC) + * Bit field mapping: @ref NVCSI_CIL_INTR1_FLAGS + */ + uint32_t cil_intr1_mask_lic; + + /** Reserved */ + uint32_t pad32__; + + /** VI EC/HSM error masking configuration */ + struct vi_hsm_csimux_error_mask_config csimux_config; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup NvCsiDataType NVCSI datatypes + * @{ + */ +#define NVCSI_DATATYPE_UNSPECIFIED MK_U32(0) +#define NVCSI_DATATYPE_YUV420_8 MK_U32(24) +#define NVCSI_DATATYPE_YUV420_10 MK_U32(25) +#define NVCSI_DATATYPE_LEG_YUV420_8 MK_U32(26) +#define NVCSI_DATATYPE_YUV420CSPS_8 MK_U32(28) +#define NVCSI_DATATYPE_YUV420CSPS_10 MK_U32(29) +#define NVCSI_DATATYPE_YUV422_8 MK_U32(30) +#define NVCSI_DATATYPE_YUV422_10 MK_U32(31) +#define NVCSI_DATATYPE_RGB444 MK_U32(32) +#define NVCSI_DATATYPE_RGB555 MK_U32(33) +#define NVCSI_DATATYPE_RGB565 MK_U32(34) +#define NVCSI_DATATYPE_RGB666 MK_U32(35) +#define NVCSI_DATATYPE_RGB888 MK_U32(36) +#define NVCSI_DATATYPE_RAW6 MK_U32(40) +#define NVCSI_DATATYPE_RAW7 MK_U32(41) +#define NVCSI_DATATYPE_RAW8 MK_U32(42) +#define NVCSI_DATATYPE_RAW10 MK_U32(43) +#define NVCSI_DATATYPE_RAW12 MK_U32(44) +#define NVCSI_DATATYPE_RAW14 MK_U32(45) +#define NVCSI_DATATYPE_RAW16 MK_U32(46) +#define NVCSI_DATATYPE_RAW20 MK_U32(47) +#define NVCSI_DATATYPE_USER_1 MK_U32(48) +#define NVCSI_DATATYPE_USER_2 MK_U32(49) +#define NVCSI_DATATYPE_USER_3 MK_U32(50) +#define NVCSI_DATATYPE_USER_4 MK_U32(51) +#define NVCSI_DATATYPE_USER_5 MK_U32(52) +#define NVCSI_DATATYPE_USER_6 MK_U32(53) +#define NVCSI_DATATYPE_USER_7 MK_U32(54) +#define NVCSI_DATATYPE_USER_8 MK_U32(55) +#define NVCSI_DATATYPE_UNKNOWN MK_U32(64) +/** @} */ + +/* DEPRECATED - to be removed */ +/** T210 (also exists in T186) */ +#define NVCSI_PATTERN_GENERATOR_T210 MK_U32(1) +/** T186 only */ +#define NVCSI_PATTERN_GENERATOR_T186 MK_U32(2) +/** T194 only */ +#define NVCSI_PATTERN_GENERATOR_T194 MK_U32(3) + +/* DEPRECATED - to be removed */ +#define NVCSI_DATA_TYPE_Unspecified MK_U32(0) +#define NVCSI_DATA_TYPE_YUV420_8 MK_U32(24) +#define NVCSI_DATA_TYPE_YUV420_10 MK_U32(25) +#define NVCSI_DATA_TYPE_LEG_YUV420_8 MK_U32(26) +#define NVCSI_DATA_TYPE_YUV420CSPS_8 MK_U32(28) +#define NVCSI_DATA_TYPE_YUV420CSPS_10 MK_U32(29) +#define NVCSI_DATA_TYPE_YUV422_8 MK_U32(30) +#define NVCSI_DATA_TYPE_YUV422_10 MK_U32(31) +#define NVCSI_DATA_TYPE_RGB444 MK_U32(32) +#define NVCSI_DATA_TYPE_RGB555 MK_U32(33) +#define NVCSI_DATA_TYPE_RGB565 MK_U32(34) +#define NVCSI_DATA_TYPE_RGB666 MK_U32(35) +#define NVCSI_DATA_TYPE_RGB888 MK_U32(36) +#define NVCSI_DATA_TYPE_RAW6 MK_U32(40) +#define NVCSI_DATA_TYPE_RAW7 MK_U32(41) +#define NVCSI_DATA_TYPE_RAW8 MK_U32(42) +#define NVCSI_DATA_TYPE_RAW10 MK_U32(43) +#define NVCSI_DATA_TYPE_RAW12 MK_U32(44) +#define NVCSI_DATA_TYPE_RAW14 MK_U32(45) +#define NVCSI_DATA_TYPE_RAW16 MK_U32(46) +#define NVCSI_DATA_TYPE_RAW20 MK_U32(47) +#define NVCSI_DATA_TYPE_Unknown MK_U32(64) + +/* NVCSI DPCM ratio */ +#define NVCSI_DPCM_RATIO_BYPASS MK_U32(0) +#define NVCSI_DPCM_RATIO_10_8_10 MK_U32(1) +#define NVCSI_DPCM_RATIO_10_7_10 MK_U32(2) +#define NVCSI_DPCM_RATIO_10_6_10 MK_U32(3) +#define NVCSI_DPCM_RATIO_12_8_12 MK_U32(4) +#define NVCSI_DPCM_RATIO_12_7_12 MK_U32(5) +#define NVCSI_DPCM_RATIO_12_6_12 MK_U32(6) +#define NVCSI_DPCM_RATIO_14_10_14 MK_U32(7) +#define NVCSI_DPCM_RATIO_14_8_14 MK_U32(8) +#define NVCSI_DPCM_RATIO_12_10_12 MK_U32(9) + +/** + * @defgroup NvCsiParamType NvCSI Parameter Type + * @{ + */ +#define NVCSI_PARAM_TYPE_UNSPECIFIED MK_U32(0) +#define NVCSI_PARAM_TYPE_DPCM MK_U32(1) +#define NVCSI_PARAM_TYPE_WATCHDOG MK_U32(2) +/**@}*/ + +struct nvcsi_dpcm_config { + uint32_t dpcm_ratio; + uint32_t pad32__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NvCSI watchdog configuration + */ +struct nvcsi_watchdog_config { + /** Enable/disable the pixel parser watchdog */ + uint8_t enable; + /** Reserved */ + uint8_t pad8__[3]; + /** The watchdog timer timeout period */ + uint32_t period; +} CAPTURE_IVC_ALIGN; + +/** + * NVCSI - TPG attributes + */ +/** + * @brief Number of vertical color bars in TPG (t186) + */ +#define NVCSI_TPG_NUM_COLOR_BARS MK_U32(8) + +/** + * @brief NvCSI test pattern generator (TPG) configuration for T186 + */ +struct nvcsi_tpg_config_t186 { + /** NvCSI stream number */ + uint8_t stream_id; + /** DEPRECATED - to be removed */ + uint8_t stream; + /** NvCSI virtual channel ID */ + uint8_t virtual_channel_id; + /** DEPRECATED - to be removed */ + uint8_t virtual_channel; + /** Initial frame number */ + uint16_t initial_frame_number; + /** Reserved */ + uint16_t pad16__; + /** Enable frame number generation */ + uint32_t enable_frame_counter; + /** NvCSI datatype */ + uint32_t datatype; + /** DEPRECATED - to be removed */ + uint32_t data_type; + /** Width of the generated test image */ + uint16_t image_width; + /** Height of the generated test image */ + uint16_t image_height; + /** Pixel value for each horizontal color bar (format according to DT) */ + uint32_t pixel_values[NVCSI_TPG_NUM_COLOR_BARS]; +} CAPTURE_IVC_ALIGN; + +/** + * @brief NvCsiTpgFlag Test pattern generator (TPG) flags for t194, tpg-ng + * (Non-Safety) + * @{ + */ +#define NVCSI_TPG_FLAG_PATCH_MODE MK_U16(1) +/** Next gen TPG sine LUT mode */ +#define NVCSI_TPG_FLAG_SINE_MODE MK_U16(2) +#define NVCSI_TPG_FLAG_PHASE_INCREMENT MK_U16(4) +#define NVCSI_TPG_FLAG_AUTO_STOP MK_U16(8) +/** TPG next gen feature to generate embedded data with config settings */ +#define NVCSI_TPG_FLAG_EMBEDDED_PATTERN_CONFIG_INFO MK_U16(16) +/** Next gen TPG LS/LE packet generation enable flag */ +#define NVCSI_TPG_FLAG_ENABLE_LS_LE MK_U16(32) +/** TPG next gen feature to transmit CPHY packets. DPHY is default option */ +#define NVCSI_TPG_FLAG_PHY_MODE_CPHY MK_U16(64) +/* Enable CRC check. + * If this flag is set, NVCSI will do CRC check for CPHY packet + * headers and ECC check for DPHY packet headers. + * TPG doesn't support ECC generation for DPHY, so enabling this + * flag together with DPHY can be used to trigger NVCSI errors. + */ +#define NVCSI_TPG_FLAG_ENABLE_HEADER_CRC_ECC_CHECK MK_U16(128) +/* Enable CRC/ECC override. + * If this flag is set, NVCSI will use override registers instead of + * of packet headers/payload CRC fields. + */ +#define NVCSI_TPG_FLAG_ENABLE_CRC_ECC_OVERRIDE MK_U16(256) +/* Force VI error forwarding. + * VI error forwarding is disabled by default for DPHY mode, + * because CRC are not generated in this mode. + * Forcing VI error forwarding allow to trigger PD CRC error for + * tests. + */ +#define NVCSI_TPG_FLAG_FORCE_NVCSI2VI_ERROR_FORWARDING MK_U16(512) + +/** @} */ + +/** + * @brief NvCSI test pattern generator (TPG) configuration for T194 + */ +struct nvcsi_tpg_config_t194 { + /** NvCSI Virtual channel ID */ + uint8_t virtual_channel_id; + /** NvCSI datatype */ + uint8_t datatype; + /** @ref NvCsiTpgFlag "NvCSI TPG flag" */ + uint16_t flags; + /** Starting framen number for TPG */ + uint16_t initial_frame_number; + /** Maximum number for frames to be generated by TPG */ + uint16_t maximum_frame_number; + /** Width of the generated frame in pixels */ + uint16_t image_width; + /** Height of the generated frame in pixels */ + uint16_t image_height; + /** Embedded data line width in bytes */ + uint32_t embedded_line_width; + /** Line count of the embedded data before the pixel frame. */ + uint32_t embedded_lines_top; + /** Line count of the embedded data after the pixel frame. */ + uint32_t embedded_lines_bottom; + /* The lane count for the VC. */ + uint32_t lane_count; + /** Initial phase */ + uint32_t initial_phase; + /** Initial horizontal frequency for red channel */ + uint32_t red_horizontal_init_freq; + /** Initial vertical frequency for red channel */ + uint32_t red_vertical_init_freq; + /** Rate of change of the horizontal frequency for red channel */ + uint32_t red_horizontal_freq_rate; + /** Rate of change of the vertical frequency for red channel */ + uint32_t red_vertical_freq_rate; + /** Initial horizontal frequency for green channel */ + uint32_t green_horizontal_init_freq; + /** Initial vertical frequency for green channel */ + uint32_t green_vertical_init_freq; + /** Rate of change of the horizontal frequency for green channel */ + uint32_t green_horizontal_freq_rate; + /** Rate of change of the vertical frequency for green channel */ + uint32_t green_vertical_freq_rate; + /** Initial horizontal frequency for blue channel */ + uint32_t blue_horizontal_init_freq; + /** Initial vertical frequency for blue channel */ + uint32_t blue_vertical_init_freq; + /** Rate of change of the horizontal frequency for blue channel */ + uint32_t blue_horizontal_freq_rate; + /** Rate of change of the vertical frequency for blue channel */ + uint32_t blue_vertical_freq_rate; +} CAPTURE_IVC_ALIGN; + +/** + * @brief next gen NvCSI test pattern generator (TPG) configuration. + */ +struct nvcsi_tpg_config_tpg_ng { + /** NvCSI Virtual channel ID */ + uint8_t virtual_channel_id; + /** NvCSI datatype */ + uint8_t datatype; + /** @ref NvCsiTpgFlag "NvCSI TPG flag" */ + uint16_t flags; + /** Starting framen number for TPG */ + uint16_t initial_frame_number; + /** Maximum number for frames to be generated by TPG */ + uint16_t maximum_frame_number; + /** Width of the generated frame in pixels */ + uint16_t image_width; + /** Height of the generated frame in pixels */ + uint16_t image_height; + /** Embedded data line width in bytes */ + uint32_t embedded_line_width; + /** Line count of the embedded data before the pixel frame. */ + uint32_t embedded_lines_top; + /** Line count of the embedded data after the pixel frame. */ + uint32_t embedded_lines_bottom; + /** Initial phase */ + uint32_t initial_phase_red; + /** Initial phase */ + uint32_t initial_phase_green; + /** Initial phase */ + uint32_t initial_phase_blue; + /** Initial horizontal frequency for red channel */ + uint32_t red_horizontal_init_freq; + /** Initial vertical frequency for red channel */ + uint32_t red_vertical_init_freq; + /** Rate of change of the horizontal frequency for red channel */ + uint32_t red_horizontal_freq_rate; + /** Rate of change of the vertical frequency for red channel */ + uint32_t red_vertical_freq_rate; + /** Initial horizontal frequency for green channel */ + uint32_t green_horizontal_init_freq; + /** Initial vertical frequency for green channel */ + uint32_t green_vertical_init_freq; + /** Rate of change of the horizontal frequency for green channel */ + uint32_t green_horizontal_freq_rate; + /** Rate of change of the vertical frequency for green channel */ + uint32_t green_vertical_freq_rate; + /** Initial horizontal frequency for blue channel */ + uint32_t blue_horizontal_init_freq; + /** Initial vertical frequency for blue channel */ + uint32_t blue_vertical_init_freq; + /** Rate of change of the horizontal frequency for blue channel */ + uint32_t blue_horizontal_freq_rate; + /** Rate of change of the vertical frequency for blue channel */ + uint32_t blue_vertical_freq_rate; + /** NvCSI stream number */ + uint8_t stream_id; + /** NvCSI tpg embedded data spare0 reg settings */ + uint8_t emb_data_spare_0; + /** NvCSI tpg embedded data spare1 reg settings */ + uint8_t emb_data_spare_1; + /** NvCSI TPG output brightness gain */ + uint8_t brightness_gain_ratio; + + /** Fields below are used if NVCSI_TPG_FLAG_ENABLE_CRC_ECC_OVERRIDE flag is set. + * Format of packet header fields override_crc_ph_*: + * bits 0..15 - first header + * bits 31..16 - second header */ + /** This field is for the CPHY SOF packet first and second packet header CRC override */ + uint32_t override_crc_ph_sof; + /** This field is for the CPHY EOF packet first and second packet header CRC override */ + uint32_t override_crc_ph_eof; + /** This field is for the CPHY SOL packet first and second packet header CRC override */ + uint32_t override_crc_ph_sol; + /** This field is for the CPHY EOL packet first and second packet header CRC override */ + uint32_t override_crc_ph_eol; + /** This field is for the CPHY long packet packet first and second packet header CRC override */ + uint32_t override_crc_ph_long_packet; + /** This field is for the long packet payload CRC override (both CPHY and DPHY) */ + uint32_t override_crc_payload; + /** The TPG will not generate ECC for a packet. When using the TPG, + * SW should set the PP to skip the ecc check. To verify the ecc logic for safety BIST, + * SW can write a pre-calculated ECC for the TPG, when use with this mode, + * the TPG should generate a grescale pattern. + * 5:0 SOF_ECC This field is for the SOF short packet ECC. + * 11:6 EOF_ECC This field is for the EOF short packet ECC. + * 17:12 SOL_ECC This field is for the SOL short packet ECC. + * 23:18 EOL_ECC This field is for the EOL short packet ECC. + * 29:24 LINE_ECC This field is for the long packet header ECC. + * */ + uint32_t override_ecc_ph; + /** Reserved size */ + uint32_t reserved; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Commong NvCSI test pattern generator (TPG) configuration + */ +union nvcsi_tpg_config { + /** TPG configuration for T186 */ + struct nvcsi_tpg_config_t186 t186; + /** TPG configuration for T194 */ + struct nvcsi_tpg_config_t194 t194; + /** Next gen TPG configuration*/ + struct nvcsi_tpg_config_tpg_ng tpg_ng; + /** Reserved size */ + uint32_t reserved[32]; +}; + +/** + * @brief TPG rate configuration, low level parameters + */ +struct nvcsi_tpg_rate_config { + /** Horizontal blanking (clocks) */ + uint32_t hblank; + /** Vertical blanking (clocks) */ + uint32_t vblank; + /** T194 only: Interval between pixels (clocks) */ + uint32_t pixel_interval; + /** next gen TPG only: data speed */ + uint32_t lane_speed; +} CAPTURE_IVC_ALIGN; + +/** + * ISP capture settings + */ + +/** + * @defgroup IspErrorMask ISP Channel error mask + */ +/** @{ */ +#define CAPTURE_ISP_CHANNEL_ERROR_DMA_PBUF_ERR MK_BIT32(0) +#define CAPTURE_ISP_CHANNEL_ERROR_DMA_SBUF_ERR MK_BIT32(1) +#define CAPTURE_ISP_CHANNEL_ERROR_DMA_SEQ_ERR MK_BIT32(2) +#define CAPTURE_ISP_CHANNEL_ERROR_FRAMEID_ERR MK_BIT32(3) +#define CAPTURE_ISP_CHANNEL_ERROR_TIMEOUT MK_BIT32(4) +#define CAPTURE_ISP_CHANNEL_ERROR_TASK_TIMEOUT MK_BIT32(5) +#define CAPTURE_ISP_CHANNEL_ERROR_ALL MK_U32(0x003F) +/** @} */ + +/** + * @defgroup ISPProcessChannelFlags ISP process channel specific flags + */ +/**@{*/ +/** Channel reset on error */ +#define CAPTURE_ISP_CHANNEL_FLAG_RESET_ON_ERROR MK_U32(0x0001) +/**@}*/ + +/** + * @brief Describes RTCPU side resources for a ISP capture pipe-line. + * + * Following structure defines ISP channel specific configuration; + */ +struct capture_channel_isp_config { + /** Unique ISP process channel ID */ + uint8_t channel_id; + /** Reserved */ + uint8_t pad_chan__[3]; + /** See ISP process channel specific @ref ISPProcessChannelFlags "flags" */ + uint32_t channel_flags; + /** + * Base address of ISP capture descriptor ring buffer. + * The size of the buffer is request_queue_depth * request_size + */ + iova_t requests; + /** Number of ISP process requests in the ring buffer */ + uint32_t request_queue_depth; + /** Size of each ISP process request (@ref isp_capture_descriptor) */ + uint32_t request_size; + + /** + * Base address of ISP program descriptor ring buffer. + * The size of the buffer is program_queue_depth * program_size + */ + iova_t programs; + /** + * Maximum number of ISP program requests in the program queue. + * Determines the size of the ISP program ring buffer. + */ + uint32_t program_queue_depth; + /** Size of each ISP process request (@ref isp_program_descriptor) */ + uint32_t program_size; + /** ISP Process output buffer syncpoint info */ + struct syncpoint_info progress_sp; + /** Statistics buffer syncpoint info */ + struct syncpoint_info stats_progress_sp; + + /** + * Base address of a memory mapped ring buffer containing ISP requests + * buffer information. + * The size of the buffer is queue_depth * request_memoryinfo_size + */ + iova_t requests_memoryinfo; + + /** + * Base address of a memory mapped ring buffer containing ISP program + * buffer information. + */ + iova_t programs_memoryinfo; + + /** Size of the memoryinfo buffer reserved for each capture request. */ + uint32_t request_memoryinfo_size; + + /** Size of the memoryinfo buffer reserved for each program request. */ + uint32_t program_memoryinfo_size; + + uint32_t reserved; + +#define HAVE_ISP_GOS_TABLES + /** Number of active ISP GOS tables in isp_gos_tables[] */ + uint32_t num_isp_gos_tables; + + /** + * GoS tables can only be programmed when there are no + * active channels. For subsequent channels we check that + * the channel configuration matches with the active + * configuration. + */ + iova_t isp_gos_tables[ISP_NUM_GOS_TABLES]; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup IspProcesStatus ISP process status codes + */ +/** @{ */ +/** ISP frame processing status unknown */ +#define CAPTURE_ISP_STATUS_UNKNOWN MK_U32(0) +/** ISP frame processing succeeded */ +#define CAPTURE_ISP_STATUS_SUCCESS MK_U32(1) +/** ISP frame processing encountered an error */ +#define CAPTURE_ISP_STATUS_ERROR MK_U32(2) +/** @} */ + +/** + * @brief ISP process request status + */ +struct capture_isp_status { + /** ISP channel id */ + uint8_t chan_id; + /** Reserved */ + uint8_t pad__; + /** Frame sequence number */ + uint16_t frame_id; + /** See @ref IspProcesStatus "ISP process status codes" */ + uint32_t status; + /** + * Error status of ISP process request. + * Zero in case of SUCCESS, non-zero value case of ERROR. + * See @ref IspErrorMask "ISP Channel error mask". + */ + uint32_t error_mask; + /** Reserved */ + uint32_t pad2__; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup IspProgramStatus ISP program status codes + */ +/** @{ */ +/** ISP program status unknown */ +#define CAPTURE_ISP_PROGRAM_STATUS_UNKNOWN MK_U32(0) +/** ISP program was used successfully for frame processing */ +#define CAPTURE_ISP_PROGRAM_STATUS_SUCCESS MK_U32(1) +/** ISP program encountered an error */ +#define CAPTURE_ISP_PROGRAM_STATUS_ERROR MK_U32(2) +/** ISP program has expired and is not being used by any active process requests */ +#define CAPTURE_ISP_PROGRAM_STATUS_STALE MK_U32(3) +/** @} */ + +/** + * @brief ISP program request status + */ +struct capture_isp_program_status { + /** ISP channel id */ + uint8_t chan_id; + /** + * The settings_id uniquely identifies the ISP program. + * The ID is assigned by application and copied here from + * the @ref isp_program_descriptor structure. + */ + uint8_t settings_id; + /** Reserved */ + uint16_t pad_id__; + /** @ref IspProgramStatus "Program status" */ + uint32_t status; + /** + * Error status from last ISP process request using this ISP program. + * Zero in case of SUCCESS, non-zero value case of ERROR. + * See @ref IspErrorMask "ISP Channel error mask". + */ + uint32_t error_mask; + /** Reserved */ + uint32_t pad2__; +} CAPTURE_IVC_ALIGN; + +/** + * @defgroup IspActivateFlag ISP program activation flag + */ +/** @{ */ +/** Program request will when the frame sequence id reaches a certain threshold */ +#define CAPTURE_ACTIVATE_FLAG_ON_SEQUENCE_ID MK_U32(0x1) +/** Program request will be activate when the frame settings id reaches a certain threshold */ +#define CAPTURE_ACTIVATE_FLAG_ON_SETTINGS_ID MK_U32(0x2) +/** Each Process request is coupled with a Program request */ +#define CAPTURE_ACTIVATE_FLAG_COUPLED MK_U32(0x4) +/** @} */ + +/** + * @brief Describes ISP program structure; + */ +struct isp_program_descriptor { + /** ISP settings_id which uniquely identifies isp_program. */ + uint8_t settings_id; + /** + * VI channel bound to the isp channel. + * In case of mem_isp_mem set this to CAPTURE_NO_VI_ISP_BINDING + */ + uint8_t vi_channel_id; +#define CAPTURE_NO_VI_ISP_BINDING MK_U8(0xFF) + /** Reserved */ + uint8_t pad_sid__[2]; + /** + * Capture sequence id, frame id; Given ISP program will be used from this frame ID onwards + * until new ISP program does replace it. + */ + uint32_t sequence; + + /** + * Offset to memory mapped ISP program buffer from ISP program descriptor base address, + * which contains the ISP configs and PB1 containing HW settings. Ideally the offset is + * the size(ATOM aligned) of ISP program descriptor only, as each isp_program would be placed + * just after it's corresponding ISP program descriptor in memory. + */ + uint32_t isp_program_offset; + /** Size of isp program structure */ + uint32_t isp_program_size; + + /** + * Base address of memory mapped ISP PB1 containing isp HW settings. + * This has to be 64 bytes aligned + */ + iova_t isp_pb1_mem; + + /** ISP program request status written by RCE */ + struct capture_isp_program_status isp_program_status; + + /** Activation condition for given ISP program. See @ref IspActivateFlag "Activation flags" */ + uint32_t activate_flags; + + /** Pad to aligned size */ + uint32_t pad__[5]; +} CAPTURE_DESCRIPTOR_ALIGN; + +/** + * @brief ISP program size (ATOM aligned). + * + * NvCapture UMD makes sure to place isp_program just after above program + * descriptor buffer for each request, so that KMD and RCE can co-locate + * isp_program and it's corresponding program descriptor in memory. + */ +#define ISP_PROGRAM_MAX_SIZE 16512 + +/** + * @brief ISP image surface info + */ +struct image_surface { + /** Lower 32-bit of the buffer's base address */ + uint32_t offset; + /** Upper 8-bit of the buffer's base address */ + uint32_t offset_hi; + /** The surface stride in bytes */ + uint32_t surface_stride; + /** Reserved */ + uint32_t pad_surf__; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Output image surface info + */ +struct stats_surface { + /** Lower 32-bit of the statistics buffer base address */ + uint32_t offset; + /** Upper 8-bit of the statistics buffer base address */ + uint32_t offset_hi; +} CAPTURE_IVC_ALIGN; + +/** + * @brief Memory write crop region info + */ +struct isp_crop_rect { + /** Topmost line stored in memory (inclusive) relative to the MW input image */ + uint16_t top; + /** Bottommost line stored in memory (inclusive) relative to the MW input image */ + uint16_t bottom; + /** Leftmost pixel stored in memory (inclusive) relative to the MW input image */ + uint16_t left; + /** Rightmost pixel stored in memory (inclusive) relative to the MW input image */ + uint16_t right; +}; + +/** + * @defgroup IspProcessFlag ISP process frame specific flags. + */ +/** @{ */ +/** Enables process status reporting for the channel */ +#define CAPTURE_ISP_FLAG_STATUS_REPORT_ENABLE MK_BIT32(0) +/** Enables error reporting for the channel */ +#define CAPTURE_ISP_FLAG_ERROR_REPORT_ENABLE MK_BIT32(1) +/** Enables process and program request binding for the channel */ +#define CAPTURE_ISP_FLAG_ISP_PROGRAM_BINDING MK_BIT32(2) +/** @} */ + +/** + * @brief ISP capture descriptor + */ +struct isp_capture_descriptor { + /** Process request sequence number, frame id */ + uint32_t sequence; + /** See @ref IspProcessFlag "ISP process frame specific flags." */ + uint32_t capture_flags; + + /** 1 MR port, max 3 input surfaces */ +#define ISP_MAX_INPUT_SURFACES MK_U32(3) + + /** Input images surfaces */ + struct image_surface input_mr_surfaces[ISP_MAX_INPUT_SURFACES]; + + /** + * 3 MW ports, max 2 surfaces (multiplanar) per port. + */ +#define ISP_MAX_OUTPUTS MK_U32(3) +#define ISP_MAX_OUTPUT_SURFACES MK_U32(2) + + struct { + /** Memory write port output surfaces */ + struct image_surface surfaces[ISP_MAX_OUTPUT_SURFACES]; + /* TODO: Should we have here just image format enum value + block height instead? + Dither settings would logically be part of ISP program */ + /** Image format definition for output surface */ + uint32_t image_def; + /** Width of the output surface in pixels */ + uint16_t width; + /** Height of the output surface in pixels */ + uint16_t height; + } outputs_mw[ISP_MAX_OUTPUTS]; + + /** Flicker band (FB) statistics buffer */ + struct stats_surface fb_surface; + /** Focus metrics (FM) statistics buffer */ + struct stats_surface fm_surface; + /** Auto Focus Metrics (AFM) statistics buffer */ + struct stats_surface afm_surface; + /** Local Average Clipping (LAC0) unit 0 statistics buffer */ + struct stats_surface lac0_surface; + /** Local Average Clipping (LAC1) unit 1 statistics buffer */ + struct stats_surface lac1_surface; + /** Histogram (H0) unit 0 statistics buffer */ + struct stats_surface h0_surface; + /** Histogram (H1) unit 1 statistics buffer */ + struct stats_surface h1_surface; + /** Pixel Replacement Unit (PRU) statistics buffer */ + struct stats_surface pru_bad_surface; + /** RAW24 Histogram Unit statistics buffer */ + struct stats_surface hist_raw24_surface; + /** Local Tone Mapping statistics buffer */ + struct stats_surface ltm_surface; + + /** Surfaces related configuration */ + struct { + /** Input image surface width in pixels */ + uint16_t mr_width; + /** Input image surface height in pixels */ + uint16_t mr_height; + + /** Height of slices used for processing the image */ + uint16_t slice_height; + /** Width of first VI chunk in a line */ + uint16_t chunk_width_first; + /** + * Width of VI chunks in the middle of a line, and/or width of + * ISP tiles in middle of a slice + */ + uint16_t chunk_width_middle; + /** Width of overfetch area in the beginning of VI chunks */ + uint16_t chunk_overfetch_width; + /** Width of the leftmost ISP tile in a slice */ + uint16_t tile_width_first; + /** Input image cfa */ + uint8_t mr_image_cfa; + /** Reserved */ + uint8_t pad__; + /** MR unit input image format value */ + uint32_t mr_image_def; + /* TODO: should this be exposed to user mode? */ + /** MR unit input image format value */ + uint32_t mr_image_def1; + /** SURFACE_CTL_MR register value */ + uint32_t surf_ctrl; + /** Byte stride between start of lines. Must be ATOM aligned */ + uint32_t surf_stride_line; + /** Byte stride between start of DPCM chunks. Must be ATOM aligned */ + uint32_t surf_stride_chunk; + } surface_configs; + + /** Reserved */ + uint32_t pad2__; + /** Base address of ISP PB2 memory */ + iova_t isp_pb2_mem; + /* TODO: Isn't PB2 size constant, do we need this? */ + /** Size of the pushbuffer 2 memory */ + uint32_t isp_pb2_size; + /** Reserved */ + uint32_t pad_pb__; + + /** Frame processing timeout in microseconds */ + uint32_t frame_timeout; + + /** + * Number of inputfences for given capture request. + * These fences are exclusively associated with ISP input ports and + * they support subframe sychronization. + */ + uint32_t num_inputfences; + /** Progress syncpoint for each one of inputfences */ + struct syncpoint_info inputfences[ISP_MAX_INPUT_SURFACES]; + + /* GID-STKHLDREQPLCL123-3812735 */ +#define ISP_MAX_PREFENCES MK_U32(14) + + /** + * Number of traditional prefences for given capture request. + * They are generic, so can be used for any pre-condition but do not + * support subframe synchronization + */ + uint32_t num_prefences; + /** Reserved */ + uint32_t pad_prefences__; + + /** Syncpoint for each one of prefences */ + struct syncpoint_info prefences[ISP_MAX_PREFENCES]; + + /** Engine result record – written by Falcon */ + struct engine_status_surface engine_status; + + /** Frame processing result record – written by RTCPU */ + struct capture_isp_status status; + + /* Information regarding the ISP program bound to this capture */ + uint32_t program_buffer_index; + + /** Reserved */ + uint32_t pad__[1]; +} CAPTURE_DESCRIPTOR_ALIGN; + +/** + * @brief ISP capture descriptor memory information + * + * ISP capture descriptor memory information shared between + * KMD and RCE only. This information cannot be part of + * capture descriptor since it is shared with usermode + * application. + */ +struct isp_capture_descriptor_memoryinfo { + struct memoryinfo_surface input_mr_surfaces[ISP_MAX_INPUT_SURFACES]; // TODO RCE + struct { + struct memoryinfo_surface surfaces[ISP_MAX_OUTPUT_SURFACES]; + } outputs_mw[ISP_MAX_OUTPUTS]; + + /** Flicker band (FB) statistics buffer */ + struct memoryinfo_surface fb_surface; + /** Focus metrics (FM) statistics buffer */ + struct memoryinfo_surface fm_surface; + /** Auto Focus Metrics (AFM) statistics buffer */ + struct memoryinfo_surface afm_surface; // + /** Local Average Clipping (LAC0) unit 0 statistics buffer */ + struct memoryinfo_surface lac0_surface; + /** Local Average Clipping (LAC1) unit 1 statistics buffer */ + struct memoryinfo_surface lac1_surface; + /** Histogram (H0) unit 0 statistics buffer */ + struct memoryinfo_surface h0_surface; + /** Histogram (H1) unit 1 statistics buffer */ + struct memoryinfo_surface h1_surface; + /** Pixel Replacement Unit (PRU) statistics buffer */ + struct memoryinfo_surface pru_bad_surface; + /** Local Tone Mapping statistics buffer */ + struct memoryinfo_surface ltm_surface; + /** RAW24 Histogram Unit statistics buffer */ + struct memoryinfo_surface hist_raw24_surface; + /** Base address of ISP PB2 memory */ + struct memoryinfo_surface isp_pb2_mem; // TODO move to programm desc meminfo + /** Engine result record – written by Falcon */ + struct memoryinfo_surface engine_status; + /* Reserved */ + uint64_t reserved[6]; +} CAPTURE_DESCRIPTOR_ALIGN; + +/** + * @brief PB2 size (ATOM aligned). + * + * NvCapture UMD makes sure to place PB2 just after above capture + * descriptor buffer for each request, so that KMD and RCE can co-locate + * PB2 and it's corresponding capture descriptor in memory. + */ +#define ISP_PB2_MAX_SIZE MK_SIZE(512) + +/** + * @brief Size allocated for the ISP program push buffer + */ +#define NVISP5_ISP_PROGRAM_PB_SIZE MK_SIZE(16384) + +/** +* @brief Size allocated for the push buffer containing output & stats +* surface definitions. Final value TBD +*/ +#define NVISP5_SURFACE_PB_SIZE MK_SIZE(512) + +/** + * @Size of engine status surface used in both VI and ISP + */ +#define NV_ENGINE_STATUS_SURFACE_SIZE MK_SIZE(16) + +/** + * @ brief Downscaler configuration information that is needed for building ISP config buffer. + * + * These registers cannot be included in push buffer but they must be provided in a structure + * that RCE can parse. Format of the fields is same as in corresponding ISP registers. + */ +struct isp5_downscaler_configbuf { + /** + * Horizontal pixel increment, in U5.20 format. I.e. 2.5 means downscaling + * by factor of 2.5. Corresponds to ISP_DM_H_PI register + */ + uint32_t pixel_incr_h; + /** + * Vertical pixel increment, in U5.20 format. I.e. 2.5 means downscaling + * by factor of 2.5. Corresponds to ISP_DM_v_PI register + */ + uint32_t pixel_incr_v; + + /** + * Offset of the first source image pixel to be used. + * Topmost 16 bits - the leftmost column to be used + * Lower 16 bits - the topmost line to be used + */ + uint32_t offset; + + /** + * Size of the scaled destination image in pixels + * Topmost 16 bits - height of destination image + * Lowest 16 bits - Width of destination image + */ + uint32_t destsize; +}; + +/** + * @brief ISP sub-units enabled bits. + */ +#define ISP5BLOCK_ENABLED_PRU_OUTLIER_REJECTION MK_BIT32(0) +#define ISP5BLOCK_ENABLED_PRU_STATS MK_BIT32(1) +#define ISP5BLOCK_ENABLED_PRU_HDR MK_BIT32(2) +#define ISP6BLOCK_ENABLED_PRU_RAW24_HIST MK_BIT32(3) /* ISP6 */ +#define ISP5BLOCK_ENABLED_AP_DEMOSAIC MK_BIT32(4) +#define ISP5BLOCK_ENABLED_AP_CAR MK_BIT32(5) +#define ISP5BLOCK_ENABLED_AP_LTM_MODIFY MK_BIT32(6) +#define ISP5BLOCK_ENABLED_AP_LTM_STATS MK_BIT32(7) +#define ISP5BLOCK_ENABLED_AP_FOCUS_METRIC MK_BIT32(8) +#define ISP5BLOCK_ENABLED_FLICKERBAND MK_BIT32(9) +#define ISP5BLOCK_ENABLED_HISTOGRAM0 MK_BIT32(10) +#define ISP5BLOCK_ENABLED_HISTOGRAM1 MK_BIT32(11) +#define ISP5BLOCK_ENABLED_DOWNSCALER0_HOR MK_BIT32(12) +#define ISP5BLOCK_ENABLED_DOWNSCALER0_VERT MK_BIT32(13) +#define ISP5BLOCK_ENABLED_DOWNSCALER1_HOR MK_BIT32(14) +#define ISP5BLOCK_ENABLED_DOWNSCALER1_VERT MK_BIT32(15) +#define ISP5BLOCK_ENABLED_DOWNSCALER2_HOR MK_BIT32(16) +#define ISP5BLOCK_ENABLED_DOWNSCALER2_VERT MK_BIT32(17) +#define ISP5BLOCK_ENABLED_SHARPEN0 MK_BIT32(18) +#define ISP5BLOCK_ENABLED_SHARPEN1 MK_BIT32(19) +#define ISP5BLOCK_ENABLED_LAC0_REGION0 MK_BIT32(20) +#define ISP5BLOCK_ENABLED_LAC0_REGION1 MK_BIT32(21) +#define ISP5BLOCK_ENABLED_LAC0_REGION2 MK_BIT32(22) +#define ISP5BLOCK_ENABLED_LAC0_REGION3 MK_BIT32(23) +#define ISP5BLOCK_ENABLED_LAC1_REGION0 MK_BIT32(24) +#define ISP5BLOCK_ENABLED_LAC1_REGION1 MK_BIT32(25) +#define ISP5BLOCK_ENABLED_LAC1_REGION2 MK_BIT32(26) +#define ISP5BLOCK_ENABLED_LAC1_REGION3 MK_BIT32(27) +/** + * Enable ISP6 LTM Softkey automatic update feature + */ +#define ISP6BLOCK_ENABLED_AP_LTM_SK_UPDATE MK_BIT32(28) + +/** + * @brief ISP overfetch requirements. + * + * ISP kernel needs access to pixels outside the active area of a tile + * to ensure continuous processing across tile borders. The amount of data + * needed depends on features enabled and some ISP parameters so this + * is program dependent. + * + * ISP extrapolates values outside image borders, so overfetch is needed only + * for borders between tiles. + */ + +struct isp_overfetch { + /** Number of pixels needed from the left side of tile */ + uint8_t left; + /** Number of pixels needed from the right side of tile */ + uint8_t right; + /** Number of pixels needed from above the tile */ + uint8_t top; + /** Number of pixels needed from below the tile */ + uint8_t bottom; + /** + * Number of pixels needed by PRU unit from left and right sides of the tile. + * This is needed to adjust tile border locations so that they align correctly + * at demosaic input. + */ + uint8_t pru_ovf_h; + /** + * Alignment requirement for tile width. Minimum alignment is 2 pixels, but + * if CAR is used this must be set to half of LPF kernel width. + */ + uint8_t alignment; + /** Reserved */ + uint8_t pad1__[2]; +}; + +/** + * @brief Identifier for ISP5 + */ +#define ISP_TYPE_ID_ISP5 MK_U16(3) + +/** + * @brief Identifier for ISP6 + */ +#define ISP_TYPE_ID_ISP6 MK_U16(4) + +/** + * @brief Magic bytes to detect ISP program struct with version information + */ + +#define ISP5_PROGRAM_STRUCT_ID MK_U32(0x50505349) + +/** + * @brief Version of ISP program struct layout + * + * Value of this constant must be increased every time when the memory layout of + * isp5_program struct changes. + */ +#define ISP5_PROGRAM_STRUCT_VERSION MK_U16(3) + + +/** + * @brief ISP program buffer + * + * Settings needed by RCE ISP driver to generate config buffer. + * Content and format of these fields is the same as corresponding ISP config buffer fields. + * See ISP_Microcode.docx for detailed description. + */ +struct isp5_program { + /** + * @brief "Magic bytes" to identify memory area as an ISP program + */ + uint32_t isp_program_struct_id; + + /** + * @brief Version of the ISP program structure + */ + uint16_t isp_program_struct_version; + + /** + * @brief Target ISP for the ISP program. + */ + uint16_t isp_type; + + /** + * Sources for LS, AP and PRU blocks. + * Format is same as in ISP's XB_SRC_0 register + */ + uint32_t xbsrc0; + + /** + * Sources for AT[0-2] and TF[0-1] blocks + * Format is same as in ISP's XB_SRC_1 register + */ + uint32_t xbsrc1; + + /** + * Sources for DS[0-2] and MW[0-2] blocks + * Format is same as in ISP's XB_SRC_2 register + */ + uint32_t xbsrc2; + + /** + * Sources for FB, LAC[0-1] and HIST[0-1] blocks + * Format is same as in ISP's XB_SRC_3 register + */ + uint32_t xbsrc3; + + /** + * Bitmask to describe which of ISP blocks are enabled. + * See microcode documentation for details. + */ + uint32_t enables_config; + + /** + * AFM configuration. See microcode documentation for details. + */ + uint32_t afm_ctrl; + + /** + * Mask for stats blocks enabled. + */ + uint32_t stats_aidx_flag; + + /** + * Size used for the push buffer in 4-byte words. + */ + uint32_t pushbuffer_size; + + /** + * Horizontal pixel increment for downscalers, in + * U5.20 format. I.e. 2.5 means downscaling + * by factor of 2.5. Corresponds to ISP_DM_H_PI register. + * This is needed by ISP Falcon firmware to program + * tile starting state correctly. + */ + uint32_t ds0_pixel_incr_h; + uint32_t ds1_pixel_incr_h; + uint32_t ds2_pixel_incr_h; + + /** ISP overfetch requirements */ + struct isp_overfetch overfetch; + + /** memory write crop region info*/ + struct { + struct isp_crop_rect mw_crop; + } outputs_mw[ISP_MAX_OUTPUTS]; + + /** Reserved */ + uint32_t pad1__[11]; + + /** + * Push buffer containing ISP settings related to this program. + * No relocations will be done for this push buffer; all registers + * that contain memory addresses that require relocation must be + * specified in the capture descriptor ISP payload. + */ + uint32_t pushbuffer[NVISP5_ISP_PROGRAM_PB_SIZE / sizeof(uint32_t)] + CAPTURE_DESCRIPTOR_ALIGN; + +} CAPTURE_DESCRIPTOR_ALIGN; + +/** + * @brief ISP Program ringbuffer element + * + * Each element in the ISP program ringbuffer contains a program descriptor immediately followed + * isp program. + */ +struct isp5_program_entry { + /** ISP capture descriptor */ + struct isp_program_descriptor prog_desc; + /** ISP program buffer */ + struct isp5_program isp_prog; +} CAPTURE_DESCRIPTOR_ALIGN; + +#pragma GCC diagnostic ignored "-Wpadded" + +#endif /* INCLUDE_CAMRTC_CAPTURE_H */ diff --git a/include/soc/tegra/tegra-i2c-rtcpu.h b/include/soc/tegra/tegra-i2c-rtcpu.h new file mode 100644 index 00000000..57ddeab6 --- /dev/null +++ b/include/soc/tegra/tegra-i2c-rtcpu.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2022 NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _LINUX_TEGRA_I2C_RTCPU_H +#define _LINUX_TEGRA_I2C_RTCPU_H + +#include +#include + +struct tegra_i2c_rtcpu_sensor; + +struct tegra_i2c_rtcpu_config { + unsigned int reg_bytes; +}; + +/* + * Sensor registration + */ + +#ifdef CONFIG_I2C_TEGRA_CAMRTC + +/* Find an I2C multi device, and register a sensor. */ +struct tegra_i2c_rtcpu_sensor *tegra_i2c_rtcpu_register_sensor( + struct i2c_client *client, + const struct tegra_i2c_rtcpu_config *config); + +#else + +static inline struct tegra_i2c_rtcpu_sensor *tegra_i2c_rtcpu_register_sensor( + struct i2c_client *client, + const struct tegra_i2c_rtcpu_config *config) +{ + return NULL; +} + +#endif + +/* + * I2C transfer + */ + +#ifdef CONFIG_I2C_TEGRA_CAMRTC + +/* Start or stop buffering of I2C transfer requests */ +int tegra_i2c_rtcpu_aggregate( + struct tegra_i2c_rtcpu_sensor *sensor, + bool start); + +/* Setting frame ID is available after aggregation started */ +int tegra_i2c_rtcpu_set_frame_id( + struct tegra_i2c_rtcpu_sensor *sensor, + int frame_id); + +/* Read one or more bytes from a sensor */ +int tegra_i2c_rtcpu_read_reg8( + struct tegra_i2c_rtcpu_sensor *sensor, + unsigned int addr, + u8 *data, + unsigned int count); + +/* Write one or more bytes to a sensor */ +int tegra_i2c_rtcpu_write_reg8( + struct tegra_i2c_rtcpu_sensor *sensor, + unsigned int addr, + const u8 *data, + unsigned int count); + +/* Write a table */ +int tegra_i2c_rtcpu_write_table_8( + struct tegra_i2c_rtcpu_sensor *sensor, + const struct reg_8 table[], + const struct reg_8 override_list[], + int num_override_regs, u16 wait_ms_addr, u16 end_addr); + +#else + +#define tegra_i2c_rtcpu_aggregate(...) (0) +#define tegra_i2c_rtcpu_set_frame_id(...) (0) +#define tegra_i2c_rtcpu_read_reg8(...) (-ENODEV) +#define tegra_i2c_rtcpu_write_reg8(...) (-ENODEV) +#define tegra_i2c_rtcpu_write_table_8(...) (-ENODEV) + +#endif + +#endif /* _LINUX_TEGRA_I2C_RTCPU_H */ diff --git a/include/trace/events/camera_common.h b/include/trace/events/camera_common.h new file mode 100644 index 00000000..feff6eb1 --- /dev/null +++ b/include/trace/events/camera_common.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2022, NVIDIA CORPORATION, All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM camera_common + +#if !defined(_TRACE_CAMERA_COMMON_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CAMERA_COMMON_H + +#include +#include + +struct tegra_channel; +struct timespec64; + +DECLARE_EVENT_CLASS(channel_simple, + TP_PROTO(const char *name), + TP_ARGS(name), + TP_STRUCT__entry( + __string(name, name) + ), + TP_fast_assign( + __assign_str(name, name); + ), + TP_printk("%s", __get_str(name)) +); + +DEFINE_EVENT(channel_simple, tegra_channel_open, + TP_PROTO(const char *name), + TP_ARGS(name) +); + +DEFINE_EVENT(channel_simple, tegra_channel_close, + TP_PROTO(const char *name), + TP_ARGS(name) +); + +DEFINE_EVENT(channel_simple, tegra_channel_notify_status_callback, + TP_PROTO(const char *name), + TP_ARGS(name) +); + +DECLARE_EVENT_CLASS(channel, + TP_PROTO(const char *name, int num), + TP_ARGS(name, num), + TP_STRUCT__entry( + __string(name, name) + __field(int, num) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->num = num; + ), + TP_printk("%s : 0x%x", __get_str(name), (int)__entry->num) +); + +DEFINE_EVENT(channel, tegra_channel_set_stream, + TP_PROTO(const char *name, int num), + TP_ARGS(name, num) +); + +DEFINE_EVENT(channel, csi_s_stream, + TP_PROTO(const char *name, int num), + TP_ARGS(name, num) +); + +DEFINE_EVENT(channel, tegra_channel_set_power, + TP_PROTO(const char *name, int num), + TP_ARGS(name, num) +); + +DEFINE_EVENT(channel, camera_common_s_power, + TP_PROTO(const char *name, int num), + TP_ARGS(name, num) +); + +DEFINE_EVENT(channel, csi_s_power, + TP_PROTO(const char *name, int num), + TP_ARGS(name, num) +); + +TRACE_EVENT(tegra_channel_capture_setup, + TP_PROTO(struct tegra_channel *chan, unsigned int index), + TP_ARGS(chan, index), + TP_STRUCT__entry( + __field(unsigned int, vnc_id) + __field(unsigned int, width) + __field(unsigned int, height) + __field(unsigned int, format) + ), + TP_fast_assign( + __entry->vnc_id = chan->vnc_id[index]; + __entry->width = chan->format.width; + __entry->height = chan->format.height; + __entry->format = chan->fmtinfo->img_fmt; + ), + TP_printk("vnc_id %u W %u H %u fmt %x", + __entry->vnc_id, __entry->width, __entry->height, + __entry->format) +); + +DECLARE_EVENT_CLASS(frame, + TP_PROTO(const char *str, struct timespec64 *ts), + TP_ARGS(str, ts), + TP_STRUCT__entry( + __string(str, str) + __field(long, tv_sec) + __field(long, tv_nsec) + ), + TP_fast_assign( + __assign_str(str, str); + __entry->tv_sec = ts->tv_sec; + __entry->tv_nsec = ts->tv_nsec; + ), + TP_printk("%s:%ld.%ld", __get_str(str), __entry->tv_sec, + __entry->tv_nsec) +); + +DEFINE_EVENT(frame, tegra_channel_capture_frame, + TP_PROTO(const char *str, struct timespec64 *ts), + TP_ARGS(str, ts) +); + +DEFINE_EVENT(frame, tegra_channel_capture_done, + TP_PROTO(const char *str, struct timespec64 *ts), + TP_ARGS(str, ts) +); +#endif + +/* This part must be outside protection */ +#include diff --git a/include/uapi/linux/nvhost_events.h b/include/uapi/linux/nvhost_events.h new file mode 100644 index 00000000..0050a7d9 --- /dev/null +++ b/include/uapi/linux/nvhost_events.h @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Eventlib interface for PVA + * + * Copyright (c) 2016-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef NVHOST_EVENTS_H +#define NVHOST_EVENTS_H + +enum { + NVHOST_SCHEMA_VERSION = 1 +}; + +#define NVHOST_EVENT_PROVIDER_NAME "nv_mm_nvhost" + +/* Marks that the task is submitted to hardware */ +struct nvhost_task_submit { + /* Engine class ID */ + __u32 class_id; + + /* Syncpoint ID */ + __u32 syncpt_id; + + /* Threshold for task completion */ + __u32 syncpt_thresh; + + /* PID */ + __u32 pid; + + /* TID */ + __u32 tid; + + /* Channel ID */ + __u32 channel_id; +} __packed; + +/* Marks that the task is moving to execution */ +struct nvhost_task_begin { + /* Engine class ID */ + __u32 class_id; + + /* Syncpoint ID */ + __u32 syncpt_id; + + /* Threshold for task completion */ + __u32 syncpt_thresh; + + /* Channel ID */ + __u32 channel_id; +} __packed; + +/* Marks that the task is completed */ +struct nvhost_task_end { + /* Engine class ID */ + __u32 class_id; + + /* Syncpoint ID */ + __u32 syncpt_id; + + /* Threshold for task completion */ + __u32 syncpt_thresh; + + /* Channel ID */ + __u32 channel_id; +} __packed; + +struct nvhost_vpu_perf_counter { + /* Engine class ID */ + __u32 class_id; + + /* Syncpoint ID */ + __u32 syncpt_id; + + /* Threshold for task completion */ + __u32 syncpt_thresh; + + /* Identifier for the R5/VPU algorithm executed */ + __u32 operation; + + /* Algorithm specific identifying tag for the perf counter */ + __u32 tag; + + __u32 count; + __u32 average; + __u64 variance; + __u32 minimum; + __u32 maximum; +} __packed; + +/* Marks the pre/postfence associated with the task */ +struct nvhost_task_fence { + /* Engine class ID */ + __u32 class_id; + + /* Kind (prefence or postfence) */ + __u32 kind; + + /* Fence-specific type (see nvdev_fence.h) */ + __u32 fence_type; + + /* Valid for NVDEV_FENCE_TYPE_SYNCPT only */ + __u32 syncpt_id; + __u32 syncpt_thresh; + + /* The task this fence is associated with */ + __u32 task_syncpt_id; + __u32 task_syncpt_thresh; + + /* Valid for NVDEV_FENCE_TYPE_SYNC_FD only */ + __u32 sync_fd; + + /* Valid for NVDEV_FENCE_TYPE_SEMAPHORE + and NVDEV_FENCE_TYPE_SEMAPHORE_TS */ + __u32 semaphore_handle; + __u32 semaphore_offset; + __u32 semaphore_value; +} __packed; + +struct nvhost_pva_task_state { + /* Engine class ID */ + __u32 class_id; + + /* Syncpoint ID */ + __u32 syncpt_id; + + /* Threshold for task completion */ + __u32 syncpt_thresh; + + /** ID of the VPU on which task was run. 0 or 1 */ + __u8 vpu_id; + + /** ID of the FW Queue on which the task was run. [0, 7] */ + __u8 queue_id; + + /* Identifier for the R5/VPU algorithm executed */ + __u64 iova; +} __packed; + + + + +/* Marks that the task is submitted to hardware */ +struct nv_camera_task_submit { + /* Engine class ID */ + __u32 class_id; + + /* Syncpoint ID */ + __u32 syncpt_id; + + /* Threshold for task completion */ + __u32 syncpt_thresh; + + /* PID */ + __u32 pid; + + /* TID */ + __u32 tid; +} __packed; + +/* Marks that the task is moving to execution */ +struct nv_camera_task_begin { + /* Engine class ID */ + __u32 class_id; + + /* Syncpoint ID */ + __u32 syncpt_id; + + /* Threshold for task completion */ + __u32 syncpt_thresh; +} __packed; + +/* Marks that the task is completed */ +struct nv_camera_task_end { + /* Engine class ID */ + __u32 class_id; + + /* Syncpoint ID */ + __u32 syncpt_id; + + /* Threshold for task completion */ + __u32 syncpt_thresh; +} __packed; + +/* Marks that we are logging a general task */ +struct nv_camera_task_log { + + /* Engine class ID */ + __u32 class_id; + + /* PID */ + __u32 pid; + + /* TID */ + __u32 tid; +} __packed; + +enum { + /* struct nvhost_task_submit */ + NVHOST_TASK_SUBMIT = 0, + + /* struct nvhost_task_begin */ + NVHOST_TASK_BEGIN = 1, + + /* struct nvhost_task_end */ + NVHOST_TASK_END = 2, + + /* struct nvhost_task_fence */ + NVHOST_TASK_FENCE = 3, + + NVHOST_VPU_PERF_COUNTER_BEGIN = 4, + NVHOST_VPU_PERF_COUNTER_END = 5, + + /* struct nvhost_pva_task_state */ + NVHOST_PVA_QUEUE_BEGIN = 6, + NVHOST_PVA_QUEUE_END = 7, + NVHOST_PVA_PREPARE_BEGIN = 8, + NVHOST_PVA_PREPARE_END = 9, + NVHOST_PVA_VPU0_BEGIN = 10, + NVHOST_PVA_VPU0_END = 11, + NVHOST_PVA_VPU1_BEGIN = 12, + NVHOST_PVA_VPU1_END = 13, + NVHOST_PVA_POST_BEGIN = 14, + NVHOST_PVA_POST_END = 15, + + /* struct nv_camera_vi_capture_setup */ + NVHOST_CAMERA_VI_CAPTURE_SETUP = 16, + + /* struct nv_camera_vi_capture_reset */ + NVHOST_CAMERA_VI_CAPTURE_RESET = 17, + + /* struct nv_camera_vi_capture_release */ + NVHOST_CAMERA_VI_CAPTURE_RELEASE = 18, + + /* struct nv_camera_vi_capture_get_info */ + NVHOST_CAMERA_VI_CAPTURE_GET_INFO = 19, + + /* struct nv_camera_vi_capture_set_config */ + NVHOST_CAMERA_VI_CAPTURE_SET_CONFIG = 20, + + /* struct nv_camera_vi_capture_request */ + NVHOST_CAMERA_VI_CAPTURE_REQUEST = 21, + + /* struct nv_camera_vi_capture_status */ + NVHOST_CAMERA_VI_CAPTURE_STATUS = 22, + + /* struct nv_camera_vi_capture_set_progress_status */ + NVHOST_CAMERA_VI_CAPTURE_SET_PROGRESS_STATUS = 24, + + /* struct nv_camera_isp_capture_setup */ + NVHOST_CAMERA_ISP_CAPTURE_SETUP = 25, + + /* struct nv_camera_isp_capture_reset */ + NVHOST_CAMERA_ISP_CAPTURE_RESET = 26, + + /* struct nv_camera_isp_capture_release */ + NVHOST_CAMERA_ISP_CAPTURE_RELEASE = 27, + + /* struct nv_camera_isp_capture_get_info */ + NVHOST_CAMERA_ISP_CAPTURE_GET_INFO = 28, + + /* struct nv_camera_isp_capture_request */ + NVHOST_CAMERA_ISP_CAPTURE_REQUEST = 29, + + /* struct nv_camera_isp_capture_status */ + NVHOST_CAMERA_ISP_CAPTURE_STATUS = 30, + + /* struct nv_camera_isp_capture_program_request */ + NVHOST_CAMERA_ISP_CAPTURE_PROGRAM_REQUEST = 31, + + /* struct nv_camera_isp_capture_program_status */ + NVHOST_CAMERA_ISP_CAPTURE_PROGRAM_STATUS = 32, + + /* struct nv_camera_isp_capture_request_ex */ + NVHOST_CAMERA_ISP_CAPTURE_REQUEST_EX = 33, + + /* struct nv_camera_isp_capture_set_progress_status */ + NVHOST_CAMERA_ISP_CAPTURE_SET_PROGRESS_STATUS = 34, + + /* struct nv_camera_task_log */ + NVHOST_CAMERA_TASK_LOG = 35, + + NVHOST_NUM_EVENT_TYPES = 36 +}; + +enum { + NVHOST_NUM_CUSTOM_FILTER_FLAGS = 0 +}; + +#endif /* NVHOST_EVENTS_H */ diff --git a/include/uapi/linux/nvhost_nvcsi_ioctl.h b/include/uapi/linux/nvhost_nvcsi_ioctl.h new file mode 100644 index 00000000..4719b064 --- /dev/null +++ b/include/uapi/linux/nvhost_nvcsi_ioctl.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra NVCSI Driver + * + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __UAPI_LINUX_NVHOST_NVCSI_IOCTL_H +#define __UAPI_LINUX_NVHOST_NVCSI_IOCTL_H + +#include +#include + +#if !defined(__KERNEL__) +#define __user +#endif + +/* Bitmap + * + * | PHY_2 | PHY_1 | PHY_0 | + * | 11 10 | 9 8 | 7 6 | 5 4 | 3 2 | 1 0 | + * | CILB | CILA | CILB | CILA | CILB | CILA | + */ +#define PHY_0_CIL_A_IO0 0 +#define PHY_0_CIL_A_IO1 1 +#define PHY_0_CIL_B_IO0 2 +#define PHY_0_CIL_B_IO1 3 + +#define PHY_1_CIL_A_IO0 4 +#define PHY_1_CIL_A_IO1 5 +#define PHY_1_CIL_B_IO0 6 +#define PHY_1_CIL_B_IO1 7 + +#define PHY_2_CIL_A_IO0 8 +#define PHY_2_CIL_A_IO1 9 +#define PHY_2_CIL_B_IO0 10 +#define PHY_2_CIL_B_IO1 11 + +#define PHY_3_CIL_A_IO0 12 +#define PHY_3_CIL_A_IO1 13 +#define PHY_3_CIL_B_IO0 14 +#define PHY_3_CIL_B_IO1 15 +#define NVCSI_PHY_CIL_NUM_LANE 16 + +#define NVCSI_PHY_0_NVCSI_CIL_A_IO0 (0x1 << PHY_0_CIL_A_IO0) +#define NVCSI_PHY_0_NVCSI_CIL_A_IO1 (0x1 << PHY_0_CIL_A_IO1) +#define NVCSI_PHY_0_NVCSI_CIL_B_IO0 (0x1 << PHY_0_CIL_B_IO0) +#define NVCSI_PHY_0_NVCSI_CIL_B_IO1 (0x1 << PHY_0_CIL_B_IO1) + +#define NVCSI_PHY_1_NVCSI_CIL_A_IO0 (0x1 << PHY_1_CIL_A_IO0) +#define NVCSI_PHY_1_NVCSI_CIL_A_IO1 (0x1 << PHY_1_CIL_A_IO1) +#define NVCSI_PHY_1_NVCSI_CIL_B_IO0 (0x1 << PHY_1_CIL_B_IO0) +#define NVCSI_PHY_1_NVCSI_CIL_B_IO1 (0x1 << PHY_1_CIL_B_IO1) + +#define NVCSI_PHY_2_NVCSI_CIL_A_IO0 (0x1 << PHY_2_CIL_A_IO0) +#define NVCSI_PHY_2_NVCSI_CIL_A_IO1 (0x1 << PHY_2_CIL_A_IO1) +#define NVCSI_PHY_2_NVCSI_CIL_B_IO0 (0x1 << PHY_2_CIL_B_IO0) +#define NVCSI_PHY_2_NVCSI_CIL_B_IO1 (0x1 << PHY_2_CIL_B_IO1) + +#define NVCSI_PHY_3_NVCSI_CIL_A_IO0 (0x1 << PHY_3_CIL_A_IO0) +#define NVCSI_PHY_3_NVCSI_CIL_A_IO1 (0x1 << PHY_3_CIL_A_IO1) +#define NVCSI_PHY_3_NVCSI_CIL_B_IO0 (0x1 << PHY_3_CIL_B_IO0) +#define NVCSI_PHY_3_NVCSI_CIL_B_IO1 (0x1 << PHY_3_CIL_B_IO1) + +#define NVCSI_PHY_NUM_BRICKS 4 +#define NVHOST_NVCSI_IOCTL_MAGIC 'N' + +#define NVHOST_NVCSI_IOCTL_DESKEW_SETUP _IOW(NVHOST_NVCSI_IOCTL_MAGIC, 1, long) +#define NVHOST_NVCSI_IOCTL_DESKEW_APPLY _IOW(NVHOST_NVCSI_IOCTL_MAGIC, 2, long) + +#endif diff --git a/include/uapi/linux/nvhost_vi_ioctl.h b/include/uapi/linux/nvhost_vi_ioctl.h new file mode 100644 index 00000000..2bbe5ec1 --- /dev/null +++ b/include/uapi/linux/nvhost_vi_ioctl.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra VI Driver + * + * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __UAPI_LINUX_NVHOST_VI_IOCTL_H +#define __UAPI_LINUX_NVHOST_VI_IOCTL_H + +#include +#include + +#if !defined(__KERNEL__) +#define __user +#endif + +#define NVHOST_VI_IOCTL_MAGIC 'V' + +/* + * /dev/nvhost-ctrl-vi devices + * + * Opening a '/dev/nvhost-ctrl-vi' device node creates a way to send + * ctrl ioctl to vi driver. + * + * /dev/nvhost-vi is for channel (context specific) operations. We use + * /dev/nvhost-ctrl-vi for global (context independent) operations on + * vi device. + */ + +#define NVHOST_VI_IOCTL_ENABLE_TPG _IOW(NVHOST_VI_IOCTL_MAGIC, 1, uint) + +#endif diff --git a/include/uapi/media/camera_device.h b/include/uapi/media/camera_device.h new file mode 100644 index 00000000..05c80d5d --- /dev/null +++ b/include/uapi/media/camera_device.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * camera device driver header + * + * Copyright (c) 2018-2022 NVIDIA Corporation. All rights reserved. + */ + +#ifndef __UAPI_CAMERA_DEVICE_H_ +#define __UAPI_CAMERA_DEVICE_H_ + +#include +#include + +#define __CAMERA_DEVICE_ALIGN __aligned(8) + +/* Sensor, focuser, iris etc., */ +#define MAX_DEVICES_PER_CHANNEL 4 + +/* + * Increasing below values must validate + * copy_from or copy_to works properly + */ +#define MAX_COMMANDS 256 +#define MAX_BLOB_SIZE 2048 + +struct i2c_bus { + __u32 reg_base; + __u32 clk_rate; + __u32 flags; + __u8 reserved[4]; +}; + +struct i2c_mux { + bool is_mux_valid; + __u8 mux_channel; + __u16 mux_addr; + __u8 reserved[4]; +}; + +struct i2c_dev { + __u16 addr; + __u8 pad[2]; + __u32 flags; +}; + +struct spi_bus { + __u32 reg_base; + __u32 clk_rate; + __u32 flags; + __u8 reserved[4]; +}; + +struct spi_dev { + __u8 port; + __u16 addr; + __u8 pad; + __u32 flags; + __u8 pad1[4]; +}; + +struct i2c_sensor_cfg { + __u32 num_devs; + struct i2c_bus bus; + struct i2c_mux mux; + struct i2c_dev sd[MAX_DEVICES_PER_CHANNEL]; +}; + +struct spi_sensor_cfg { + __u32 num_devs; + struct spi_bus bus; + struct spi_dev sd[MAX_DEVICES_PER_CHANNEL]; +}; + +struct sensor_cfg { + __u8 type; /* SPI or I2C */ + __u8 pad[3]; /* for alignment */ + union { + struct i2c_sensor_cfg i2c_sensor; + struct spi_sensor_cfg spi_sensor; + } u; +} __CAMERA_DEVICE_ALIGN; + +struct sensor_cmd { + __u32 opcode; + __u32 addr; +}; + +struct sensor_blob { + __u32 num_cmds; + __u32 buf_size; + struct sensor_cmd cmds[MAX_COMMANDS]; + __u8 buf[MAX_BLOB_SIZE]; +} __CAMERA_DEVICE_ALIGN; + +struct sensor_blob_cfg { + __u32 nlines; + struct sensor_blob *blob; +} __CAMERA_DEVICE_ALIGN; + +#define CAMERA_DEVICE_NONE 0 +#define CAMERA_DEVICE_I2C_SENSOR (0x1 << 1) +#define CAMERA_DEVICE_SPI_SENSOR (0x1 << 2) +/* Future extensions - if necessary */ +#define CAMERA_DEVICE_VI (0x1 << 8) +#define CAMERA_DEVICE_CSI (0x1 << 9) +#define CAMERA_DEVICE_ISP (0x1 << 16) + +struct camdev_chan_cfg { + __u32 type; + struct sensor_cfg scfg; +} __CAMERA_DEVICE_ALIGN; + +/* common functionality */ +#define CAMERA_DEVICE_REGISTER _IOW('C', 1, struct camdev_chan_cfg) +#define CAMERA_DEVICE_UNREGISTER _IOW('C', 2, __u32) +/* sensor functionality */ +#define SENSOR_BLOB_EXECUTE _IOW('C', 10, struct sensor_blob_cfg) + +#endif diff --git a/include/uapi/media/tegra_camera_platform.h b/include/uapi/media/tegra_camera_platform.h new file mode 100644 index 00000000..18cebaa6 --- /dev/null +++ b/include/uapi/media/tegra_camera_platform.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _UAPI_TEGRA_CAMERA_PLATFORM_H_ +#define _UAPI_TEGRA_CAMERA_PLATFORM_H_ + +#include +#include + +#define TEGRA_CAMERA_IOCTL_SET_BW _IOW('o', 1, struct bw_info) +#define TEGRA_CAMERA_IOCTL_GET_BW _IOR('o', 2, __u64) +#define TEGRA_CAMERA_IOCTL_GET_CURR_REQ_ISO_BW _IOR('o', 3, __u64) + +struct bw_info { + __u8 is_iso; + __u64 bw; +}; + +#endif \ No newline at end of file diff --git a/include/video/vi4.h b/include/video/vi4.h new file mode 100644 index 00000000..495901a8 --- /dev/null +++ b/include/video/vi4.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tegra Graphics Host VI + * + * Copyright (c) 2015-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __TEGRA_VI4_H__ +#define __TEGRA_VI4_H__ + +#include + +struct reset_control; + +extern struct vi_notify_driver nvhost_vi_notify_driver; +void nvhost_vi_notify_error(struct platform_device *); + +struct nvhost_vi_dev { + struct nvhost_vi_notify_dev *hvnd; + struct reset_control *vi_reset; + struct reset_control *vi_tsc_reset; + struct dentry *debug_dir; + int error_irq; + bool busy; + atomic_t overflow; + atomic_t notify_overflow; + atomic_t fmlite_overflow; + struct tegra_mc_vi mc_vi; + unsigned int vi_bypass_bw; +}; + +int nvhost_vi4_prepare_poweroff(struct platform_device *); +int nvhost_vi4_finalize_poweron(struct platform_device *); +void nvhost_vi4_idle(struct platform_device *); +void nvhost_vi4_busy(struct platform_device *); +void nvhost_vi4_reset(struct platform_device *); +int nvhost_vi4_aggregate_constraints(struct platform_device *dev, + int clk_index, + unsigned long floor_rate, + unsigned long pixelrate, + unsigned long bw_constraint); + +int vi4_v4l2_set_la(struct platform_device *pdev, + u32 vi_bypass_bw, u32 is_ioctl); +#endif