media: camera: Build tegra-camera as OOT module

Port camera drivers below from /kenrel/nvidia to
/kernel/nvidia-oot as OOT modules:
- Fusa-capture driver
- Tegra V4L2 framework driver
- vi/csi driver
- tegra camera platform driver

Change-Id: I390af27096425bb11e0934201dd1a90f001bb3fa
Signed-off-by: Frank Chen <frankc@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2780698
Reviewed-by: FNU Raunak <fraunak@nvidia.com>
Reviewed-by: Ankur Pawar <ankurp@nvidia.com>
Reviewed-by: Shiva Dubey <sdubey@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Frank Chen
2022-09-22 16:01:45 -07:00
committed by mobile promotions
parent e2b55e1a2c
commit 92ac7bc35a
74 changed files with 28226 additions and 0 deletions

View File

@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
obj-m += cdi/
obj-m += isc/
obj-m += camera/

View File

@@ -0,0 +1,35 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/host
LINUXINCLUDE += -I$(srctree)/drivers/video/tegra/host
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/camera
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/media/platform/tegra
LINUXINCLUDE += -DCONFIG_TEGRA_HOST1X
LINUXINCLUDE += -Werror
tegra-camera-objs := regmap_util.o
tegra-camera-objs += camera_common.o
tegra-camera-objs += camera_gpio.o
tegra-camera-objs += sensor_common.o
tegra-camera-objs += camera_version_utils.o
tegra-camera-objs += nvcamera_log.o
tegra-camera-objs += tegracam_v4l2.o
tegra-camera-objs += tegracam_core.o
tegra-camera-objs += tegracam_ctrls.o
tegra-camera-objs += tegracam_utils.o
tegra-camera-objs += vi/vi5_fops.o
tegra-camera-objs += vi/mc_common.o
tegra-camera-objs += vi/graph.o
tegra-camera-objs += vi/channel.o
tegra-camera-objs += vi/core.o
tegra-camera-objs += csi/csi.o
tegra-camera-objs += nvcsi/csi5_fops.o
tegra-camera-objs += fusa-capture/capture-vi.o
tegra-camera-objs += fusa-capture/capture-common.o
tegra-camera-objs += fusa-capture/capture-vi-channel.o
tegra-camera-objs += fusa-capture/capture-isp-channel.o
tegra-camera-objs += fusa-capture/capture-isp.o
obj-m += tegra-camera.o

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,149 @@
// SPDX-License-Identifier: GPL-2.0
/*
* virtual.c - Camera GPIO driver
*
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/list.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include "camera_gpio.h"
struct camera_gpio {
struct list_head list;
unsigned gpio_num;
struct mutex mutex;
atomic_t state_cnt;
atomic_t use_cnt;
};
static DEFINE_MUTEX(g_mutex);
static LIST_HEAD(cam_gpio_list);
int cam_gpio_register(struct device *dev,
unsigned pin_num) {
struct camera_gpio *new_gpio;
struct camera_gpio *next_gpio;
mutex_lock(&g_mutex);
list_for_each_entry(next_gpio, &cam_gpio_list, list) {
if (next_gpio->gpio_num == pin_num) {
dev_dbg(dev,
"%s: gpio pin %u already registered.\n",
__func__, pin_num);
atomic_inc(&next_gpio->use_cnt);
mutex_unlock(&g_mutex);
return 0;
}
}
/* gpio is not present in the cam_gpio_list, add it */
new_gpio = kzalloc(sizeof(*new_gpio), GFP_KERNEL);
if (!new_gpio) {
dev_err(dev, "%s: memory low!\n", __func__);
mutex_unlock(&g_mutex);
return -EFAULT;
}
dev_dbg(dev, "%s: adding cam gpio %u\n",
__func__, pin_num);
new_gpio->gpio_num = pin_num;
mutex_init(&new_gpio->mutex);
atomic_inc(&new_gpio->use_cnt);
list_add(&new_gpio->list, &cam_gpio_list);
mutex_unlock(&g_mutex);
return 0;
}
EXPORT_SYMBOL(cam_gpio_register);
void cam_gpio_deregister(struct device *dev,
unsigned pin_num) {
struct camera_gpio *next_gpio;
mutex_lock(&g_mutex);
list_for_each_entry(next_gpio, &cam_gpio_list, list) {
if (next_gpio->gpio_num == pin_num) {
atomic_dec(&next_gpio->use_cnt);
if (atomic_read(&next_gpio->use_cnt) == 0) {
list_del(&next_gpio->list);
kfree(next_gpio);
dev_dbg(dev,
"%s: removing cam gpio %u\n",
__func__, pin_num);
}
break;
}
}
mutex_unlock(&g_mutex);
return;
}
EXPORT_SYMBOL(cam_gpio_deregister);
int cam_gpio_ctrl(struct device *dev,
unsigned pin_num, int val,
bool active_high) /* val: 0=deassert, 1=assert */
{
struct camera_gpio *next_gpio;
int err = -EINVAL;
int pin_val;
bool found = false;
list_for_each_entry(next_gpio, &cam_gpio_list, list) {
mutex_lock(&next_gpio->mutex);
if (next_gpio->gpio_num == pin_num) {
found = true;
if (!atomic_read(&next_gpio->state_cnt) &&
!val) {
dev_err(dev,
"%s: state cnt can't be < 0\n",
__func__);
mutex_unlock(&next_gpio->mutex);
return err;
}
if (val)
atomic_inc(&next_gpio->state_cnt);
else
atomic_dec(&next_gpio->state_cnt);
pin_val = active_high ? val : !val;
pin_val &= 1;
err = pin_val;
/* subtract val allows a 0 check to be
* used to indicate that gpio can be written to*/
if (atomic_read(&next_gpio->state_cnt) - val == 0) {
gpio_set_value_cansleep(pin_num, pin_val);
dev_dbg(dev, "%s %u %d\n",
__func__, pin_num, pin_val);
}
}
mutex_unlock(&next_gpio->mutex);
}
if (!found)
dev_dbg(dev,
"WARNING %s: gpio %u not in list\n",
__func__, pin_num);
return err; /* return value written or error */
}
EXPORT_SYMBOL(cam_gpio_ctrl);

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __CAMERA_GPIO_H__
#define __CAMERA_GPIO_H__
int cam_gpio_register(struct device *dev,
unsigned pin_num);
void cam_gpio_deregister(struct device *dev,
unsigned pin_num);
int cam_gpio_ctrl(struct device *dev,
unsigned pin_num, int ref_inc, bool active_high);
#endif
/* __CAMERA_GPIO_H__ */

View File

@@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
/*
* camera_version_utils.c - utilities for different kernel versions
* camera driver supports
*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <media/camera_common.h>
int tegra_media_entity_init(struct media_entity *entity, u16 num_pads,
struct media_pad *pad, bool is_subdev, bool is_sensor)
{
if (!is_subdev) {
entity->obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
entity->function = MEDIA_ENT_F_IO_V4L;
} else {
entity->obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
entity->function = is_sensor ? MEDIA_ENT_F_CAM_SENSOR :
MEDIA_ENT_F_OLD_SUBDEV_BASE + 10;
}
return media_entity_pads_init(entity, num_pads, pad);
}
EXPORT_SYMBOL(tegra_media_entity_init);
bool tegra_is_v4l2_subdev(struct media_entity *entity)
{
return is_media_entity_v4l2_subdev(entity);
}
EXPORT_SYMBOL(tegra_is_v4l2_subdev);
int tegra_media_create_link(struct media_entity *source, u16 source_pad,
struct media_entity *sink, u16 sink_pad, u32 flags)
{
int ret = 0;
ret = media_create_pad_link(source, source_pad,
sink, sink_pad, flags);
return ret;
}
EXPORT_SYMBOL(tegra_media_create_link);
bool tegra_v4l2_match_dv_timings(struct v4l2_dv_timings *t1,
struct v4l2_dv_timings *t2,
unsigned pclock_delta,
bool match_reduced_fps)
{
return v4l2_match_dv_timings(t1, t2, pclock_delta, match_reduced_fps);
}
EXPORT_SYMBOL(tegra_v4l2_match_dv_timings);
int tegra_vb2_dma_init(struct device *dev, void **alloc_ctx,
unsigned int size, atomic_t *refcount)
{
int ret = 0;
if (atomic_inc_return(refcount) > 1)
return 0;
if (vb2_dma_contig_set_max_seg_size(dev, SZ_64K)) {
dev_err(dev, "failed to init vb2 buffer\n");
ret = -ENOMEM;
}
return ret;
}
EXPORT_SYMBOL(tegra_vb2_dma_init);
void tegra_vb2_dma_cleanup(struct device *dev, void *alloc_ctx,
atomic_t *refcount)
{
if (atomic_dec_return(refcount) < 0)
dev_err(dev, "%s: put to negative references\n", __func__);
/* dont call vb2_dma_contig_clear_max_seg_size as it will */
/* call kfree dma_parms but dma_parms is static member */
}
EXPORT_SYMBOL(tegra_vb2_dma_cleanup);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,676 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
/**
* @file drivers/media/platform/tegra/camera/fusa-capture/capture-common.c
*
* @brief VI/ISP channel common operations for the T186/T194 Camera RTCPU
* platform.
*/
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/nospec.h>
#include <linux/nvhost.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <linux/atomic.h>
#include <media/mc_common.h>
#include <media/fusa-capture/capture-common.h>
/**
* @brief Capture buffer management table.
*/
struct capture_buffer_table {
struct device *dev; /**< Originating device (VI or ISP) */
struct kmem_cache *cache; /**< SLAB allocator cache */
rwlock_t hlock; /**< Reader/writer lock on table contents */
DECLARE_HASHTABLE(hhead, 4U); /**< Buffer hashtable head */
};
/**
* @brief Capture surface NvRm and IOVA addresses handle.
*/
union capture_surface {
uint64_t raw; /**< Pinned VI or ISP IOVA address */
struct {
uint32_t offset; /**< NvRm handle (upper 32 bits) */
uint32_t hmem;
/**<
* Offset of surface or pushbuffer address in descriptor
* (lower 32 bits) [byte]
*/
};
};
/**
* @brief Capture buffer mapping (pinned).
*/
struct capture_mapping {
struct hlist_node hnode; /**< Hash table node struct */
atomic_t refcnt; /**< Capture mapping reference count */
struct dma_buf *buf; /** Capture mapping dma_buf */
struct dma_buf_attachment *atch;
/**< dma_buf attachment (VI or ISP device) */
struct sg_table *sgt; /**< Scatterlist to dma_buf attachment */
unsigned int flag; /**< Bitmask access flag */
};
/**
* @brief Determine whether all the bits of @a other are set in @a self.
*
* @param[in] self Bitmask flag to be compared
* @param[in] other Bitmask value(s) to compare
*
* @retval true compatible
* @retval false not compatible
*/
static inline bool flag_compatible(
unsigned int self,
unsigned int other)
{
return (self & other) == other;
}
/**
* @brief Determine whether BUFFER_RDWR is set in @a flag.
*
* @param[in] flag Bitmask flag to be compared
*
* @retval true BUFFER_RDWR set
* @retval false BUFFER_RDWR not set
*/
static inline unsigned int flag_access_mode(
unsigned int flag)
{
return flag & BUFFER_RDWR;
}
/**
* @brief Map capture common buffer access flag to a Linux dma_data_direction.
*
* @param[in] flag Bitmask access flag of capture common buffer
*
* @returns @ref dma_data_direction mapping
*/
static inline enum dma_data_direction flag_dma_direction(
unsigned int flag)
{
static const enum dma_data_direction dir[4U] = {
[0U] = DMA_BIDIRECTIONAL,
[BUFFER_READ] = DMA_TO_DEVICE,
[BUFFER_WRITE] = DMA_FROM_DEVICE,
[BUFFER_RDWR] = DMA_BIDIRECTIONAL,
};
return dir[flag_access_mode(flag)];
}
/**
* @brief Retrieve the scatterlist IOVA address of the capture surface mapping.
*
* @param[in] pin The capture_mapping of the buffer
*
* @returns Physical address of scatterlist mapping
*/
static inline dma_addr_t mapping_iova(
const struct capture_mapping *pin)
{
dma_addr_t addr = sg_dma_address(pin->sgt->sgl);
return (addr != 0) ? addr : sg_phys(pin->sgt->sgl);
}
/**
* @brief Retrieve the dma_buf pointer of a capture surface mapping.
*
* @param[in] pin The capture_mapping of the buffer
*
* @returns Pointer to the capture_mapping @ref dma_buf
*/
static inline struct dma_buf *mapping_buf(
const struct capture_mapping *pin)
{
return pin->buf;
}
/**
* @brief Determine whether BUFFER_ADD is set in the capture surface mapping's
* access flag.
*
* @param[in] pin The capture_mapping of the buffer
*
* @retval true BUFFER_ADD set
* @retval false BUFFER_ADD not set
*/
static inline bool mapping_preserved(
const struct capture_mapping *pin)
{
return (bool)(pin->flag & BUFFER_ADD);
}
/**
* @brief Set or unset the BUFFER_ADD bit in the capture surface mapping's
* access flag, and correspondingly increment or decrement the mapping's refcnt.
*
* @param[in] pin The capture_mapping of the buffer
* @param[in] val The capture_mapping of the buffer
*
* @retval true BUFFER_ADD set
* @retval false BUFFER_ADD not set
*/
static inline void set_mapping_preservation(
struct capture_mapping *pin,
bool val)
{
if (val) {
pin->flag |= BUFFER_ADD;
atomic_inc(&pin->refcnt);
} else {
pin->flag &= (~BUFFER_ADD);
atomic_dec(&pin->refcnt);
}
}
/**
* @brief Iteratively search a capture buffer management table to find the entry
* with @a buf, and @a flag bits set in the capture mapping.
*
* On success, the capture mapping is incremented by one if it is non-zero.
*
* @param[in] tab The capture buffer management table
* @param[in] buf The mapping dma_buf pointer to match
* @param[in] flag The mapping bitmask access flag to compare
*
* @returns @ref capture_mapping pointer (success), NULL (failure)
*/
static struct capture_mapping *find_mapping(
struct capture_buffer_table *tab,
struct dma_buf *buf,
unsigned int flag)
{
struct capture_mapping *pin;
bool success;
read_lock(&tab->hlock);
hash_for_each_possible(tab->hhead, pin, hnode, (unsigned long)buf) {
if (
(pin->buf == buf) &&
flag_compatible(pin->flag, flag)
) {
success = atomic_inc_not_zero(&pin->refcnt);
if (success) {
read_unlock(&tab->hlock);
return pin;
}
}
}
read_unlock(&tab->hlock);
return NULL;
}
/**
* @brief Add an NvRm buffer to the buffer management table and initialize its
* refcnt to 1.
*
* @param[in] tab The capture buffer management table
* @param[in] fd The NvRm handle
* @param[in] flag The mapping bitmask access flag to set
*
* @returns @ref capture_mapping pointer (success), PTR_ERR (failure)
*/
static struct capture_mapping *get_mapping(
struct capture_buffer_table *tab,
uint32_t fd,
unsigned int flag)
{
struct capture_mapping *pin;
struct dma_buf *buf;
void *err;
if (unlikely(tab == NULL)) {
pr_err("%s: invalid buffer table\n", __func__);
return ERR_PTR(-EINVAL);
}
buf = dma_buf_get((int)fd);
if (IS_ERR(buf)) {
dev_err(tab->dev, "%s:%d: invalid memfd %u; errno %ld \n",
__func__, __LINE__, fd, PTR_ERR(buf));
return ERR_CAST(buf);
}
pin = find_mapping(tab, buf, flag);
if (pin != NULL) {
dma_buf_put(buf);
return pin;
}
pin = kmem_cache_alloc(tab->cache, GFP_KERNEL);
if (unlikely(pin == NULL)) {
err = ERR_PTR(-ENOMEM);
goto err0;
}
pin->atch = dma_buf_attach(buf, tab->dev);
if (unlikely(IS_ERR(pin->atch))) {
err = pin->atch;
goto err1;
}
pin->sgt = dma_buf_map_attachment(pin->atch, flag_dma_direction(flag));
if (unlikely(IS_ERR(pin->sgt))) {
err = pin->sgt;
goto err2;
}
pin->flag = flag;
pin->buf = buf;
atomic_set(&pin->refcnt, 1U);
INIT_HLIST_NODE(&pin->hnode);
write_lock(&tab->hlock);
hash_add(tab->hhead, &pin->hnode, (unsigned long)pin->buf);
write_unlock(&tab->hlock);
return pin;
err2:
dma_buf_detach(buf, pin->atch);
err1:
kmem_cache_free(tab->cache, pin);
err0:
dma_buf_put(buf);
dev_err(tab->dev, "%s:%d: memfd %u, flag %u; errno %ld \n",
__func__, __LINE__,fd, flag, PTR_ERR(buf));
return err;
}
struct capture_buffer_table *create_buffer_table(
struct device *dev)
{
struct capture_buffer_table *tab;
tab = kmalloc(sizeof(*tab), GFP_KERNEL);
if (likely(tab != NULL)) {
tab->cache = KMEM_CACHE(capture_mapping, 0U);
if (likely(tab->cache != NULL)) {
tab->dev = dev;
hash_init(tab->hhead);
rwlock_init(&tab->hlock);
} else {
kfree(tab);
tab = NULL;
}
}
return tab;
}
EXPORT_SYMBOL_GPL(create_buffer_table);
void destroy_buffer_table(
struct capture_buffer_table *tab)
{
size_t bkt;
struct hlist_node *next;
struct capture_mapping *pin;
if (unlikely(tab == NULL))
return;
write_lock(&tab->hlock);
hash_for_each_safe(tab->hhead, bkt, next, pin, hnode) {
hash_del(&pin->hnode);
dma_buf_unmap_attachment(
pin->atch, pin->sgt, flag_dma_direction(pin->flag));
dma_buf_detach(pin->buf, pin->atch);
dma_buf_put(pin->buf);
kmem_cache_free(tab->cache, pin);
}
write_unlock(&tab->hlock);
kmem_cache_destroy(tab->cache);
kfree(tab);
}
EXPORT_SYMBOL_GPL(destroy_buffer_table);
static DEFINE_MUTEX(req_lock);
int capture_buffer_request(
struct capture_buffer_table *tab,
uint32_t memfd,
uint32_t flag)
{
struct capture_mapping *pin;
struct dma_buf *buf;
bool add = (bool)(flag & BUFFER_ADD);
int err = 0;
if (unlikely(tab == NULL)) {
pr_err("%s: invalid buffer table\n", __func__);
return -EINVAL;
}
mutex_lock(&req_lock);
if (add) {
pin = get_mapping(tab, memfd, flag_access_mode(flag));
if (IS_ERR(pin)) {
err = PTR_ERR_OR_ZERO(pin);
dev_err(tab->dev, "%s:%d: memfd %u, flag %u; errno %d",
__func__, __LINE__, memfd, flag, err);
goto end;
}
if (mapping_preserved(pin)) {
err = -EEXIST;
dev_err(tab->dev, "%s:%d: memfd %u exists; errno %d",
__func__, __LINE__, memfd, err);
put_mapping(tab, pin);
goto end;
}
} else {
buf = dma_buf_get((int)memfd);
if (IS_ERR(buf)) {
err = PTR_ERR_OR_ZERO(buf);
dev_err(tab->dev, "%s:%d: invalid memfd %u; errno %d",
__func__, __LINE__, memfd, err);
goto end;
}
pin = find_mapping(tab, buf, BUFFER_ADD);
if (pin == NULL) {
err = -ENOENT;
dev_err(tab->dev, "%s:%d: memfd %u not exists; errno %d",
__func__, __LINE__, memfd, err);
dma_buf_put(buf);
goto end;
}
dma_buf_put(buf);
}
set_mapping_preservation(pin, add);
put_mapping(tab, pin);
end:
mutex_unlock(&req_lock);
return err;
}
EXPORT_SYMBOL_GPL(capture_buffer_request);
int capture_buffer_add(
struct capture_buffer_table *t,
uint32_t fd)
{
return capture_buffer_request(t, fd, BUFFER_ADD | BUFFER_RDWR);
}
EXPORT_SYMBOL_GPL(capture_buffer_add);
void put_mapping(
struct capture_buffer_table *t,
struct capture_mapping *pin)
{
bool zero;
zero = atomic_dec_and_test(&pin->refcnt);
if (zero) {
if (unlikely(mapping_preserved(pin))) {
dev_err(t->dev, "%s:%d: unexpected put for a preserved mapping",
__func__, __LINE__);
atomic_inc(&pin->refcnt);
return;
}
write_lock(&t->hlock);
hash_del(&pin->hnode);
write_unlock(&t->hlock);
dma_buf_unmap_attachment(
pin->atch, pin->sgt, flag_dma_direction(pin->flag));
dma_buf_detach(pin->buf, pin->atch);
dma_buf_put(pin->buf);
kmem_cache_free(t->cache, pin);
}
}
EXPORT_SYMBOL_GPL(put_mapping);
int capture_common_pin_and_get_iova(struct capture_buffer_table *buf_ctx,
uint32_t mem_handle, uint64_t mem_offset,
uint64_t *meminfo_base_address, uint64_t *meminfo_size,
struct capture_common_unpins *unpins)
{
struct capture_mapping *map;
struct dma_buf *buf;
uint64_t size;
uint64_t iova;
/* NULL is a valid unput indicating unused data field */
if (!mem_handle) {
return 0;
}
if (unpins->num_unpins >= MAX_PIN_BUFFER_PER_REQUEST) {
pr_err("%s: too many buffers per request\n", __func__);
return -ENOMEM;
}
map = get_mapping(buf_ctx, mem_handle, BUFFER_RDWR);
if (IS_ERR(map)) {
pr_err("%s: cannot get mapping\n", __func__);
return -EINVAL;
}
buf = mapping_buf(map);
size = buf->size;
iova = mapping_iova(map);
if (mem_offset >= size) {
pr_err("%s: offset is out of bounds\n", __func__);
return -EINVAL;
}
*meminfo_base_address = iova + mem_offset;
*meminfo_size = size - mem_offset;
unpins->data[unpins->num_unpins] = map;
unpins->num_unpins++;
return 0;
}
EXPORT_SYMBOL_GPL(capture_common_pin_and_get_iova);
int capture_common_setup_progress_status_notifier(
struct capture_common_status_notifier *status_notifier,
uint32_t mem,
uint32_t buffer_size,
uint32_t mem_offset)
{
struct dma_buf *dmabuf;
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)
struct dma_buf_map map;
#else
struct iosys_map map;
#endif
void *va;
int err = 0;
/* take reference for the userctx */
dmabuf = dma_buf_get(mem);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
if (buffer_size > U32_MAX - mem_offset) {
pr_err("%s: buffer_size or mem_offset too large\n", __func__);
return -EINVAL;
}
if ((buffer_size + mem_offset) > dmabuf->size) {
dma_buf_put(dmabuf);
pr_err("%s: invalid offset\n", __func__);
return -EINVAL;
}
/* map handle and clear error notifier struct */
err = dma_buf_vmap(dmabuf, &map);
va = err ? NULL : map.vaddr;
if (!va) {
dma_buf_put(dmabuf);
pr_err("%s: Cannot map notifier handle\n", __func__);
return -ENOMEM;
}
memset(va, 0, buffer_size);
status_notifier->buf = dmabuf;
status_notifier->va = va;
status_notifier->offset = mem_offset;
return 0;
}
EXPORT_SYMBOL_GPL(capture_common_setup_progress_status_notifier);
int capture_common_release_progress_status_notifier(
struct capture_common_status_notifier *progress_status_notifier)
{
struct dma_buf *dmabuf = progress_status_notifier->buf;
void *va = progress_status_notifier->va;
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(va);
#else
struct iosys_map map = IOSYS_MAP_INIT_VADDR(va);
#endif
if (dmabuf != NULL) {
if (va != NULL)
dma_buf_vunmap(dmabuf, &map);
dma_buf_put(dmabuf);
}
progress_status_notifier->buf = NULL;
progress_status_notifier->va = NULL;
progress_status_notifier->offset = 0;
return 0;
}
EXPORT_SYMBOL_GPL(capture_common_release_progress_status_notifier);
int capture_common_set_progress_status(
struct capture_common_status_notifier *progress_status_notifier,
uint32_t buffer_slot,
uint32_t buffer_depth,
uint8_t new_val)
{
uint32_t *status_notifier = (uint32_t *) (progress_status_notifier->va +
progress_status_notifier->offset);
if (buffer_slot >= buffer_depth) {
pr_err("%s: Invalid offset!", __func__);
return -EINVAL;
}
buffer_slot = array_index_nospec(buffer_slot, buffer_depth);
/*
* Since UMD and KMD can both write to the shared progress status
* notifier buffer, insert memory barrier here to ensure that any
* other store operations to the buffer would be done before the
* write below.
*/
wmb();
status_notifier[buffer_slot] = new_val;
return 0;
}
EXPORT_SYMBOL_GPL(capture_common_set_progress_status);
int capture_common_pin_memory(
struct device *dev,
uint32_t mem,
struct capture_common_buf *unpin_data)
{
struct dma_buf *buf;
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)
struct dma_buf_map map;
#else
struct iosys_map map;
#endif
struct dma_buf_attachment *attach;
struct sg_table *sgt;
int err = 0;
buf = dma_buf_get(mem);
if (IS_ERR(buf)) {
err = PTR_ERR(buf);
goto fail;
}
attach = dma_buf_attach(buf, dev);
if (IS_ERR(attach)) {
err = PTR_ERR(attach);
goto fail;
}
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
goto fail;
}
if (sg_dma_address(sgt->sgl) == 0)
sg_dma_address(sgt->sgl) = sg_phys(sgt->sgl);
err = dma_buf_vmap(buf, &map);
unpin_data->va = err ? NULL : map.vaddr;
if (unpin_data->va == NULL) {
pr_err("%s: failed to map pinned memory\n", __func__);
goto fail;
}
unpin_data->iova = sg_dma_address(sgt->sgl);
unpin_data->buf = buf;
unpin_data->attach = attach;
unpin_data->sgt = sgt;
return 0;
fail:
capture_common_unpin_memory(unpin_data);
return err;
}
EXPORT_SYMBOL_GPL(capture_common_pin_memory);
void capture_common_unpin_memory(
struct capture_common_buf *unpin_data)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(unpin_data->va);
#else
struct iosys_map map = IOSYS_MAP_INIT_VADDR(unpin_data->va);
#endif
if (unpin_data->va)
dma_buf_vunmap(unpin_data->buf, &map);
if (unpin_data->sgt != NULL)
dma_buf_unmap_attachment(unpin_data->attach, unpin_data->sgt,
DMA_BIDIRECTIONAL);
if (unpin_data->attach != NULL)
dma_buf_detach(unpin_data->buf, unpin_data->attach);
if (unpin_data->buf != NULL)
dma_buf_put(unpin_data->buf);
unpin_data->sgt = NULL;
unpin_data->attach = NULL;
unpin_data->buf = NULL;
unpin_data->iova = 0;
unpin_data->va = NULL;
}
EXPORT_SYMBOL_GPL(capture_common_unpin_memory);

View File

@@ -0,0 +1,627 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
/**
* @file drivers/media/platform/tegra/camera/fusa-capture/capture-isp-channel.c
*
* @brief ISP channel character device driver for the T186/T194 Camera RTCPU
* platform.
*/
#include <asm/ioctls.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/of_platform.h>
#include <linux/nvhost.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/uaccess.h>
#include <media/fusa-capture/capture-isp.h>
#include <media/fusa-capture/capture-isp-channel.h>
/**
* @todo This parameter is platform-dependent and should be retrieved from the
* Device Tree.
*/
#define MAX_ISP_CHANNELS 64
/**
* @brief ISP channel character device driver context.
*/
struct isp_channel_drv {
struct device *dev; /**< ISP kernel @em device */
u8 num_channels; /**< No. of ISP channel character devices */
struct mutex lock; /**< ISP channel driver context lock. */
struct platform_device *ndev; /**< ISP kernel @em platform_device */
const struct isp_channel_drv_ops *ops;
/**< ISP fops for Host1x syncpt/gos allocations */
struct tegra_isp_channel *channels[];
/**< Allocated ISP channel contexts */
};
/**
* @defgroup ISP_CHANNEL_IOCTLS
*
* @brief ISP channel character device IOCTL API
*
* Clients in the UMD may open sysfs character devices representing ISP
* channels, and perform configuration, and enqueue buffers in capture and
* program requests to the low-level RCE subsystem via these IOCTLs.
*
* @{
*/
/**
* @brief Set up ISP channel resources and request FW channel allocation in RCE.
*
* Initialize the ISP channel context and synchronization primitives, pin memory
* for the capture and program process descriptor queues, set up the buffer
* management table, initialize the capture/capture-control IVC channels and
* request ISP FW channel allocation in RCE.
*
* @param[in] ptr Pointer to a struct @ref isp_capture_setup
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_SETUP \
_IOW('I', 1, struct isp_capture_setup)
/**
* @brief Release the ISP FW channel allocation in RCE, and all resources and
* contexts in the KMD.
*
* @param[in] rel uint32_t bitmask of @ref CAPTURE_CHANNEL_RESET_FLAGS
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_RELEASE \
_IOW('I', 2, __u32)
/**
* @brief Reset the ISP channel in RCE synchronously w/ the KMD; all pending
* capture/program descriptors in the queue are discarded and syncpoint values
* fast-forwarded to unblock waiting clients.
*
* @param[in] rst uint32_t bitmask of @ref CAPTURE_CHANNEL_RESET_FLAGS
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_RESET \
_IOW('I', 3, __u32)
/**
* @brief Retrieve the ids and current values of the progress, stats progress
* syncpoints, and ISP FW channel allocated by RCE.
*
* If successful, the queried values are written back to the input struct.
*
* @param[in,out] ptr Pointer to a struct @ref isp_capture_info
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_GET_INFO \
_IOR('I', 4, struct isp_capture_info)
/**
* @brief Enqueue a process capture request to RCE, input and prefences are
* allocated, and the addresses to surface buffers in the descriptor (referenced
* by the buffer_index) are pinned and patched.
*
* @param[in] ptr Pointer to a struct @ref isp_capture_req
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_REQUEST \
_IOW('I', 5, struct isp_capture_req)
/**
* @brief Wait on the next completion of an enqueued frame, signalled by RCE.
* The status in the frame's capture descriptor is safe to read when this
* completes w/o a -ETIMEDOUT or other error.
*
* @note This call completes for the frame at the head of the FIFO queue, and is
* not necessarily for the most recently enqueued process capture request.
*
* @param[in] status uint32_t timeout [ms], 0 for indefinite
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_STATUS \
_IOW('I', 6, __u32)
/**
* @brief Enqueue a program request to RCE, the addresses to the push buffer in
* the descriptor (referenced by the buffer_index) are pinned and patched.
*
* @param[in] ptr Pointer to a struct @ref isp_program_req
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_PROGRAM_REQUEST \
_IOW('I', 7, struct isp_program_req)
/**
* @brief Wait on the next completion of an enqueued program, signalled by RCE.
* The program execution is finished and is safe to free when this call
* completes.
*
* @note This call completes for the program at the head of the FIFO queue, and
* is not necessarily for the most recently enqueued program request.
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_PROGRAM_STATUS \
_IOW('I', 8, __u32)
/**
* @brief Enqueue a joint capture and program request to RCE; this is equivalent
* to calling @ref ISP_CAPTURE_PROGRAM_REQUEST and @ref ISP_CAPTURE_REQUEST
* sequentially, but the number of KMD-RCE IVC transmissions is reduced to one
* in each direction for every frame.
*
* @param[in] ptr Pointer to a struct @ref isp_capture_req_ex
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_REQUEST_EX \
_IOW('I', 9, struct isp_capture_req_ex)
/**
* @brief Set up the combined capture and program process progress status
* notifier array, which is a replacement for the blocking
* @ref ISP_CAPTURE_STATUS and @ref ISP_CAPTURE_PROGRAM_STATUS calls; allowing
* for out-of-order frame process completion notifications.
*
* The values written by the KMD are any of the
* @ref CAPTURE_PROGRESS_NOTIFIER_STATES.
*
* @param[in] ptr Pointer to a struct @ref isp_capture_progress_status_req
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_SET_PROGRESS_STATUS_NOTIFIER \
_IOW('I', 10, struct isp_capture_progress_status_req)
/**
* @brief Perform an operation on the surface buffer by setting the bitwise
* @a flag field with @ref CAPTURE_BUFFER_OPS flags.
*
* @param[in] ptr Pointer to a struct @ref isp_buffer_req.
*
* @returns 0 (success), neg. errno (failure)
*/
#define ISP_CAPTURE_BUFFER_REQUEST \
_IOW('I', 11, struct isp_buffer_req)
/** @} */
/**
* @brief Power on ISP via Host1x. The ISP channel is registered as an NvHost
* ISP client and the reference count is incremented by one.
*
* @param[in] chan ISP channel context
* @returns 0 (success), neg. errno (failure)
*/
static int isp_channel_power_on(
struct tegra_isp_channel *chan)
{
int ret = 0;
dev_dbg(chan->isp_dev, "isp_channel_power_on\n");
ret = nvhost_module_add_client(chan->ndev, chan->priv);
if (ret < 0) {
dev_err(chan->isp_dev,
"%s: failed to add isp client\n", __func__);
return ret;
}
ret = nvhost_module_busy(chan->ndev);
if (ret < 0) {
dev_err(chan->isp_dev,
"%s: failed to power on isp\n", __func__);
return ret;
}
return 0;
}
/**
* @brief Power off ISP via Host1x. The NvHost module reference count is
* decreased by one and the ISP channel is unregistered as a client.
*
* @param[in] chan ISP channel context
*/
static void isp_channel_power_off(
struct tegra_isp_channel *chan)
{
dev_dbg(chan->isp_dev, "isp_channel_power_off\n");
nvhost_module_idle(chan->ndev);
nvhost_module_remove_client(chan->ndev, chan->priv);
}
static struct isp_channel_drv *chdrv_;
static DEFINE_MUTEX(chdrv_lock);
/**
* @brief Open an ISP channel character device node, power on the camera
* subsystem and initialize the channel driver context.
*
* The act of opening an ISP channel character device node does not entail the
* reservation of an ISP channel, ISP_CAPTURE_SETUP must be called afterwards
* to request an allocation by RCE.
*
* This is the @a open file operation handler for an ISP channel node.
*
* @param[in] inode ISP channel character device inode struct
* @param[in] file ISP channel character device file struct
*
* @returns 0 (success), neg. errno (failure)
*/
static int isp_channel_open(
struct inode *inode,
struct file *file)
{
struct tegra_isp_channel *chan;
unsigned int channel = iminor(inode);
struct isp_channel_drv *chan_drv;
int err;
if (mutex_lock_interruptible(&chdrv_lock))
return -ERESTARTSYS;
chan_drv = chdrv_;
if (chan_drv == NULL || channel >= chan_drv->num_channels) {
mutex_unlock(&chdrv_lock);
return -ENODEV;
}
mutex_unlock(&chdrv_lock);
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (unlikely(chan == NULL))
return -ENOMEM;
chan->drv = chan_drv;
chan->isp_dev = chan_drv->dev;
chan->ndev = chan_drv->ndev;
chan->ops = chan_drv->ops;
chan->priv = file;
err = isp_channel_power_on(chan);
if (err < 0)
goto error;
err = isp_capture_init(chan);
if (err < 0)
goto init_err;
mutex_lock(&chan_drv->lock);
if (chan_drv->channels[channel] != NULL) {
mutex_unlock(&chan_drv->lock);
err = -EBUSY;
goto chan_err;
}
chan_drv->channels[channel] = chan;
mutex_unlock(&chan_drv->lock);
file->private_data = chan;
return nonseekable_open(inode, file);
chan_err:
isp_capture_shutdown(chan);
init_err:
isp_channel_power_off(chan);
error:
kfree(chan);
return err;
}
/**
* @brief Release an ISP channel character device node, power off the camera
* subsystem and free the ISP channel driver context.
*
* Under normal operation, ISP_CAPTURE_RESET followed by ISP_CAPTURE_RELEASE
* should be called before releasing the file handle on the device node.
*
* This is the @a release file operation handler for an ISP channel node.
*
* @param[in] inode ISP channel character device inode struct
* @param[in] file ISP channel character device file struct
*
* @returns 0
*/
static int isp_channel_release(
struct inode *inode,
struct file *file)
{
struct tegra_isp_channel *chan = file->private_data;
unsigned int channel = iminor(inode);
struct isp_channel_drv *chan_drv = chan->drv;
isp_capture_shutdown(chan);
isp_channel_power_off(chan);
mutex_lock(&chan_drv->lock);
WARN_ON(chan_drv->channels[channel] != chan);
chan_drv->channels[channel] = NULL;
mutex_unlock(&chan_drv->lock);
kfree(chan);
return 0;
}
/**
* @brief Process an IOCTL call on an ISP channel character device.
*
* Depending on the specific IOCTL, the argument (@a arg) may be a pointer to a
* defined struct payload that is copied from or back to user-space. This memory
* is allocated and mapped from user-space and must be kept available until
* after the IOCTL call completes.
*
* This is the @a ioctl file operation handler for an ISP channel node.
*
* @param[in] file ISP channel character device file struct
* @param[in] cmd ISP channel IOCTL command
* @param[in,out] arg IOCTL argument; numerical value or pointer
*
* @returns 0 (success), neg. errno (failure)
*/
static long isp_channel_ioctl(
struct file *file,
unsigned int cmd,
unsigned long arg)
{
struct tegra_isp_channel *chan = file->private_data;
void __user *ptr = (void __user *)arg;
long err = -EFAULT;
if (unlikely(chan == NULL)) {
pr_err("%s: invalid channel\n", __func__);
return -EINVAL;
}
switch (_IOC_NR(cmd)) {
case _IOC_NR(ISP_CAPTURE_SETUP): {
struct isp_capture_setup setup;
if (copy_from_user(&setup, ptr, sizeof(setup)))
break;
err = isp_capture_setup(chan, &setup);
if (err)
dev_err(chan->isp_dev, "isp capture setup failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_RESET): {
uint32_t rst;
if (copy_from_user(&rst, ptr, sizeof(rst)))
break;
err = isp_capture_reset(chan, rst);
if (err)
dev_err(chan->isp_dev, "isp capture reset failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_RELEASE): {
uint32_t rel;
if (copy_from_user(&rel, ptr, sizeof(rel)))
break;
err = isp_capture_release(chan, rel);
if (err)
dev_err(chan->isp_dev, "isp capture release failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_GET_INFO): {
struct isp_capture_info info;
(void)memset(&info, 0, sizeof(info));
err = isp_capture_get_info(chan, &info);
if (err) {
dev_err(chan->isp_dev, "isp capture get info failed\n");
break;
}
if (copy_to_user(ptr, &info, sizeof(info)))
err = -EFAULT;
break;
}
case _IOC_NR(ISP_CAPTURE_REQUEST): {
struct isp_capture_req req;
if (copy_from_user(&req, ptr, sizeof(req)))
break;
err = isp_capture_request(chan, &req);
if (err)
dev_err(chan->isp_dev,
"isp process capture request submit failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_STATUS): {
uint32_t status;
if (copy_from_user(&status, ptr, sizeof(status)))
break;
err = isp_capture_status(chan, status);
if (err)
dev_err(chan->isp_dev,
"isp process get status failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_PROGRAM_REQUEST): {
struct isp_program_req program_req;
if (copy_from_user(&program_req, ptr, sizeof(program_req)))
break;
err = isp_capture_program_request(chan, &program_req);
if (err)
dev_err(chan->isp_dev,
"isp process program request submit failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_PROGRAM_STATUS): {
err = isp_capture_program_status(chan);
if (err)
dev_err(chan->isp_dev,
"isp process program get status failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_REQUEST_EX): {
struct isp_capture_req_ex req;
if (copy_from_user(&req, ptr, sizeof(req)))
break;
err = isp_capture_request_ex(chan, &req);
if (err)
dev_err(chan->isp_dev,
"isp process request extended submit failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_SET_PROGRESS_STATUS_NOTIFIER): {
struct isp_capture_progress_status_req req;
if (copy_from_user(&req, ptr, sizeof(req)))
break;
err = isp_capture_set_progress_status_notifier(chan, &req);
if (err)
dev_err(chan->isp_dev,
"isp capture set progress status buffers failed\n");
break;
}
case _IOC_NR(ISP_CAPTURE_BUFFER_REQUEST): {
struct isp_buffer_req req;
if (copy_from_user(&req, ptr, sizeof(req)) != 0U)
break;
err = isp_capture_buffer_request(chan, &req);
if (err < 0)
dev_err(chan->isp_dev, "isp buffer req failed\n");
break;
}
default: {
dev_err(chan->isp_dev, "%s:Unknown ioctl\n", __func__);
return -ENOIOCTLCMD;
}
}
return err;
}
static const struct file_operations isp_channel_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = isp_channel_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = isp_channel_ioctl,
#endif
.open = isp_channel_open,
.release = isp_channel_release,
};
/* Character device */
static struct class *isp_channel_class;
static int isp_channel_major;
int isp_channel_drv_register(
struct platform_device *ndev,
const struct isp_channel_drv_ops *ops)
{
struct isp_channel_drv *chan_drv;
unsigned int i;
chan_drv = kzalloc(offsetof(struct isp_channel_drv,
channels[MAX_ISP_CHANNELS]), GFP_KERNEL);
if (unlikely(chan_drv == NULL))
return -ENOMEM;
chan_drv->dev = &ndev->dev;
chan_drv->ndev = ndev;
chan_drv->ops = ops;
chan_drv->num_channels = MAX_ISP_CHANNELS;
mutex_init(&chan_drv->lock);
mutex_lock(&chdrv_lock);
if (WARN_ON(chdrv_ != NULL)) {
mutex_unlock(&chdrv_lock);
kfree(chan_drv);
return -EBUSY;
}
chdrv_ = chan_drv;
mutex_unlock(&chdrv_lock);
for (i = 0; i < chan_drv->num_channels; i++) {
dev_t devt = MKDEV(isp_channel_major, i);
device_create(isp_channel_class, chan_drv->dev, devt, NULL,
"capture-isp-channel%u", i);
}
return 0;
}
EXPORT_SYMBOL(isp_channel_drv_register);
void isp_channel_drv_unregister(
struct device *dev)
{
struct isp_channel_drv *chan_drv;
unsigned int i;
mutex_lock(&chdrv_lock);
chan_drv = chdrv_;
chdrv_ = NULL;
WARN_ON(chan_drv->dev != dev);
mutex_unlock(&chdrv_lock);
for (i = 0; i < chan_drv->num_channels; i++) {
dev_t devt = MKDEV(isp_channel_major, i);
device_destroy(isp_channel_class, devt);
}
kfree(chan_drv);
}
EXPORT_SYMBOL(isp_channel_drv_unregister);
/**
* @brief Initialize the ISP channel driver device (major).
*
* @returns 0 (success), PTR_ERR or neg. ISP channel major no. (failuure)
*/
int isp_channel_drv_init(void)
{
isp_channel_class = class_create(THIS_MODULE, "capture-isp-channel");
if (IS_ERR(isp_channel_class))
return PTR_ERR(isp_channel_class);
isp_channel_major = register_chrdev(0, "capture-isp-channel",
&isp_channel_fops);
if (isp_channel_major < 0) {
class_destroy(isp_channel_class);
return isp_channel_major;
}
return 0;
}
EXPORT_SYMBOL(isp_channel_drv_init);
/**
* @brief De-initialize the ISP channel driver device (major).
*/
void isp_channel_drv_exit(void)
{
unregister_chrdev(isp_channel_major, "capture-isp-channel");
class_destroy(isp_channel_class);
}
EXPORT_SYMBOL(isp_channel_drv_exit);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,767 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
/**
* @file drivers/media/platform/tegra/camera/fusa-capture/capture-vi-channel.c
*
* @brief VI channel character device driver for the T186/T194 Camera RTCPU
* platform.
*/
#include <asm/ioctls.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/of_platform.h>
#include <linux/nvhost.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <media/fusa-capture/capture-vi.h>
#include <media/fusa-capture/capture-vi-channel.h>
#include <linux/arm64-barrier.h>
/**
* @defgroup VI_CHANNEL_IOCTLS
*
* @brief VI channel character device IOCTL API
*
* Clients in the UMD may open sysfs character devices representing VI channels,
* and perform configuration and enqueue buffers in capture requests to the
* low-level RCE subsystem via these IOCTLs.
*
* @{
*/
/**
* @brief Set up ISP channel resources and request FW channel allocation in RCE.
*
* Initialize the VI channel context and synchronization primitives, pin memory
* for the capture descriptor queue, set up the buffer management table,
* initialize the capture/capture-control IVC channels and request VI FW channel
* allocation in RCE.
*
* @param[in] ptr Pointer to a struct @ref vi_capture_setup
*
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_SETUP \
_IOW('I', 1, struct vi_capture_setup)
/**
* @brief Release the VI FW channel allocation in RCE, and all resources and
* contexts in the KMD.
*
* @param[in] reset_flags uint32_t bitmask of
* @ref CAPTURE_CHANNEL_RESET_FLAGS
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_RELEASE \
_IOW('I', 2, __u32)
/**
* @brief Execute a blocking capture-control IVC request to RCE.
*
* @param[in] ptr Pointer to a struct @ref vi_capture_control_msg
*
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_SET_CONFIG \
_IOW('I', 3, struct vi_capture_control_msg)
/**
* @brief Reset the VI channel in RCE synchronously w/ the KMD; all pending
* capture descriptors in the queue are discarded and syncpoint values
* fast-forwarded to unblock waiting clients.
*
* @param[in] reset_flags uint32_t bitmask of
* @ref CAPTURE_CHANNEL_RESET_FLAGS
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_RESET \
_IOW('I', 4, __u32)
/**
* @brief Retrieve the ids and current values of the progress, embedded data and
* line timer syncpoints, and VI HW channel(s) allocated by RCE.
*
* If successful, the queried values are written back to the input struct.
*
* @param[in,out] ptr Pointer to a struct @ref vi_capture_info
*
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_GET_INFO \
_IOR('I', 5, struct vi_capture_info)
/**
* @brief Enqueue a capture request to RCE, the addresses to surface buffers in
* the descriptor (referenced by the buffer_index) are pinned and patched.
*
* The payload shall be a pointer to a struct @ref vi_capture_req.
*
* @param[in] ptr Pointer to a struct @ref vi_capture_compand
*
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_REQUEST \
_IOW('I', 6, struct vi_capture_req)
/**
* Wait on the next completion of an enqueued frame, signalled by RCE. The
* status in the frame's capture descriptor is safe to read when this completes
* w/o a -ETIMEDOUT or other error.
*
* @note This call completes for the frame at the head of the FIFO queue, and is
* not necessarily for the most recently enqueued capture request.
*
* @param[in] timeout_ms uint32_t timeout [ms], 0 for indefinite
*
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_STATUS \
_IOW('I', 7, __u32)
/**
* @brief Set up the capture progress status notifier array, which is a
* replacement for the blocking @ref VI_CAPTURE_STATUS call; allowing for
* out-of-order frame completion notifications.
*
* The values written by the KMD are any of the
* @ref CAPTURE_PROGRESS_NOTIFIER_STATES.
*
* @param[in] ptr Pointer to a struct @ref vi_capture_progress_status_req
*
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_SET_PROGRESS_STATUS_NOTIFIER \
_IOW('I', 9, struct vi_capture_progress_status_req)
/**
* @brief Perform an operation on the surface buffer by setting the bitwise
* @a flag field with @ref CAPTURE_BUFFER_OPS flags.
*
* @param[in] ptr Pointer to a struct @ref vi_buffer_req
* @returns 0 (success), neg. errno (failure)
*/
#define VI_CAPTURE_BUFFER_REQUEST \
_IOW('I', 10, struct vi_buffer_req)
/** @} */
void vi_capture_request_unpin(
struct tegra_vi_channel *chan,
uint32_t buffer_index)
{
struct vi_capture *capture = chan->capture_data;
struct capture_common_unpins *unpins;
int i = 0;
mutex_lock(&capture->unpins_list_lock);
unpins = &capture->unpins_list[buffer_index];
if (unpins->num_unpins != 0) {
for (i = 0; i < unpins->num_unpins; i++) {
if (capture->buf_ctx != NULL && unpins->data[i] != NULL)
put_mapping(capture->buf_ctx, unpins->data[i]);
}
(void)memset(unpins, 0U,sizeof(*unpins));
}
mutex_unlock(&capture->unpins_list_lock);
}
EXPORT_SYMBOL(vi_capture_request_unpin);
static struct vi_channel_drv *chdrv_;
static DEFINE_MUTEX(chdrv_lock);
struct tegra_vi_channel *vi_channel_open_ex(
unsigned int channel,
bool is_mem_pinned)
{
struct tegra_vi_channel *chan;
struct vi_channel_drv *chan_drv;
int err;
if (mutex_lock_interruptible(&chdrv_lock))
return ERR_PTR(-ERESTARTSYS);
chan_drv = chdrv_;
if (chan_drv == NULL || channel >= chan_drv->num_channels) {
mutex_unlock(&chdrv_lock);
return ERR_PTR(-ENODEV);
}
mutex_unlock(&chdrv_lock);
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (unlikely(chan == NULL))
return ERR_PTR(-ENOMEM);
chan->drv = chan_drv;
if (chan_drv->use_legacy_path) {
chan->dev = chan_drv->dev;
chan->ndev = chan_drv->ndev;
} else
chan->vi_capture_pdev = chan_drv->vi_capture_pdev;
chan->ops = chan_drv->ops;
err = vi_capture_init(chan, is_mem_pinned);
if (err < 0)
goto error;
mutex_lock(&chan_drv->lock);
if (rcu_access_pointer(chan_drv->channels[channel]) != NULL) {
mutex_unlock(&chan_drv->lock);
err = -EBUSY;
goto rcu_err;
}
rcu_assign_pointer(chan_drv->channels[channel], chan);
mutex_unlock(&chan_drv->lock);
return chan;
rcu_err:
vi_capture_shutdown(chan);
error:
kfree(chan);
return ERR_PTR(err);
}
EXPORT_SYMBOL(vi_channel_open_ex);
int vi_channel_close_ex(
unsigned int channel,
struct tegra_vi_channel *chan)
{
struct vi_channel_drv *chan_drv = chan->drv;
vi_capture_shutdown(chan);
mutex_lock(&chan_drv->lock);
WARN_ON(rcu_access_pointer(chan_drv->channels[channel]) != chan);
RCU_INIT_POINTER(chan_drv->channels[channel], NULL);
mutex_unlock(&chan_drv->lock);
kfree_rcu(chan, rcu);
return 0;
}
EXPORT_SYMBOL(vi_channel_close_ex);
/**
* @brief Open a VI channel character device node; pass parameters to
* @ref vi_channel_open_ex subroutine to complete initialization.
*
* This is the @a open file operation handler for a VI channel node.
*
* @param[in] inode VI channel character device inode struct
* @param[in] file VI channel character device file struct
*
* @returns 0 (success), neg. errno (failure)
*/
static int vi_channel_open(
struct inode *inode,
struct file *file)
{
unsigned int channel = iminor(inode);
struct tegra_vi_channel *chan;
chan = vi_channel_open_ex(channel, true);
if (IS_ERR(chan))
return PTR_ERR(chan);
file->private_data = chan;
return nonseekable_open(inode, file);
}
/**
* @brief Release a VI channel character device node; pass parameters to
* @ref vi_channel_close_ex subroutine to complete release.
*
* This is the @a release file operation handler for a VI channel node.
*
* @param[in] inode VI channel character device inode struct
* @param[in] file VI channel character device file struct
*
* @returns 0
*/
static int vi_channel_release(
struct inode *inode,
struct file *file)
{
struct tegra_vi_channel *chan = file->private_data;
unsigned int channel = iminor(inode);
vi_channel_close_ex(channel, chan);
return 0;
}
/**
* Pin/map buffers and save iova boundaries into corresponding
* memoryinfo struct.
*/
static int pin_vi_capture_request_buffers_locked(struct tegra_vi_channel *chan,
struct vi_capture_req *req,
struct capture_common_unpins *request_unpins)
{
struct vi_capture *capture = chan->capture_data;
struct capture_descriptor* desc = (struct capture_descriptor*)
(capture->requests.va +
req->buffer_index * capture->request_size);
struct capture_descriptor_memoryinfo* desc_mem =
&capture->requests_memoryinfo[req->buffer_index];
int i;
int err = 0;
/* Buffer count: ATOMP surfaces + engine_surface */
BUG_ON(VI_NUM_ATOMP_SURFACES + 1U >= MAX_PIN_BUFFER_PER_REQUEST);
for (i = 0; i < VI_NUM_ATOMP_SURFACES; i++) {
err = capture_common_pin_and_get_iova(capture->buf_ctx,
desc->ch_cfg.atomp.surface[i].offset_hi,
desc->ch_cfg.atomp.surface[i].offset,
&desc_mem->surface[i].base_address, &desc_mem->surface[i].size,
request_unpins);
if (err) {
dev_err(chan->dev, "%s: get atomp iova failed\n", __func__);
goto fail;
}
}
err = capture_common_pin_and_get_iova(capture->buf_ctx,
desc->engine_status.offset_hi,
desc->engine_status.offset,
&desc_mem->engine_status_surface_base_address,
&desc_mem->engine_status_surface_size,
request_unpins);
if (err) {
dev_err(chan->dev, "%s: get engine surf iova failed\n", __func__);
goto fail;
}
fail:
/* Unpin cleanup is done in vi_capture_request_unpin() */
return err;
}
/**
* @brief Process an IOCTL call on a VI channel character device.
*
* Depending on the specific IOCTL, the argument (@a arg) may be a pointer to a
* defined struct payload that is copied from or back to user-space. This memory
* is allocated and mapped from user-space and must be kept available until
* after the IOCTL call completes.
*
* This is the @a ioctl file operation handler for a VI channel node.
*
* @param[in] file VI channel character device file struct
* @param[in] cmd VI channel IOCTL command
* @param[in,out] arg IOCTL argument; numerical value or pointer
*
* @returns 0 (success), neg. errno (failure)
*/
static long vi_channel_ioctl(
struct file *file,
unsigned int cmd,
unsigned long arg)
{
struct tegra_vi_channel *chan = file->private_data;
struct vi_capture *capture = chan->capture_data;
void __user *ptr = (void __user *)arg;
int err = -EFAULT;
switch (_IOC_NR(cmd)) {
case _IOC_NR(VI_CAPTURE_SETUP): {
struct vi_capture_setup setup;
if (copy_from_user(&setup, ptr, sizeof(setup)))
break;
if (chan->drv->use_legacy_path == false) {
vi_get_nvhost_device(chan, &setup);
if (chan->dev == NULL) {
dev_err(&chan->vi_capture_pdev->dev,
"%s: channel device is NULL",
__func__);
return -EINVAL;
}
}
if (setup.request_size < sizeof(struct capture_descriptor)) {
dev_err(chan->dev,
"request size is too small to fit capture descriptor\n");
return -EINVAL;
}
if (capture->buf_ctx) {
dev_err(chan->dev, "vi buffer setup already done");
return -EFAULT;
}
capture->buf_ctx = create_buffer_table(chan->dev);
if (capture->buf_ctx == NULL) {
dev_err(chan->dev, "vi buffer setup failed");
break;
}
/* pin the capture descriptor ring buffer */
err = capture_common_pin_memory(capture->rtcpu_dev,
setup.mem, &capture->requests);
if (err < 0) {
dev_err(chan->dev,
"%s: memory setup failed\n", __func__);
destroy_buffer_table(capture->buf_ctx);
capture->buf_ctx = NULL;
return -EFAULT;
}
/* Check that buffer size matches queue depth */
if ((capture->requests.buf->size / setup.request_size) <
setup.queue_depth) {
dev_err(chan->dev,
"%s: descriptor buffer is too small for given queue depth\n",
__func__);
capture_common_unpin_memory(&capture->requests);
destroy_buffer_table(capture->buf_ctx);
capture->buf_ctx = NULL;
return -ENOMEM;
}
setup.iova = capture->requests.iova;
err = vi_capture_setup(chan, &setup);
if (err < 0) {
dev_err(chan->dev, "vi capture setup failed\n");
capture_common_unpin_memory(&capture->requests);
destroy_buffer_table(capture->buf_ctx);
capture->buf_ctx = NULL;
return err;
}
break;
}
case _IOC_NR(VI_CAPTURE_RESET): {
uint32_t reset_flags;
int i;
if (copy_from_user(&reset_flags, ptr, sizeof(reset_flags)))
break;
err = vi_capture_reset(chan, reset_flags);
if (err < 0)
dev_err(chan->dev, "vi capture reset failed\n");
else {
for (i = 0; i < capture->queue_depth; i++)
vi_capture_request_unpin(chan, i);
}
break;
}
case _IOC_NR(VI_CAPTURE_RELEASE): {
uint32_t reset_flags;
int i;
if (copy_from_user(&reset_flags, ptr, sizeof(reset_flags)))
break;
err = vi_capture_release(chan, reset_flags);
if (err < 0)
dev_err(chan->dev, "vi capture release failed\n");
else {
for (i = 0; i < capture->queue_depth; i++)
vi_capture_request_unpin(chan, i);
capture_common_unpin_memory(&capture->requests);
destroy_buffer_table(capture->buf_ctx);
capture->buf_ctx = NULL;
vfree(capture->unpins_list);
capture->unpins_list = NULL;
}
break;
}
case _IOC_NR(VI_CAPTURE_GET_INFO): {
struct vi_capture_info info;
(void)memset(&info, 0, sizeof(info));
err = vi_capture_get_info(chan, &info);
if (err < 0) {
dev_err(chan->dev, "vi capture get info failed\n");
break;
}
if (copy_to_user(ptr, &info, sizeof(info)))
err = -EFAULT;
break;
}
case _IOC_NR(VI_CAPTURE_SET_CONFIG): {
struct vi_capture_control_msg msg;
if (copy_from_user(&msg, ptr, sizeof(msg)))
break;
err = vi_capture_control_message_from_user(chan, &msg);
if (err < 0)
dev_err(chan->dev, "vi capture set config failed\n");
break;
}
case _IOC_NR(VI_CAPTURE_REQUEST): {
struct vi_capture_req req;
struct capture_common_unpins *request_unpins;
if (copy_from_user(&req, ptr, sizeof(req)))
break;
if (req.num_relocs == 0) {
dev_err(chan->dev, "request must have non-zero relocs\n");
return -EINVAL;
}
if (req.buffer_index >= capture->queue_depth) {
dev_err(chan->dev, "buffer index is out of bound\n");
return -EINVAL;
}
/* Don't let to speculate with invalid buffer_index value */
spec_bar();
if (capture->unpins_list == NULL) {
dev_err(chan->dev, "Channel setup incomplete\n");
return -EINVAL;
}
mutex_lock(&capture->unpins_list_lock);
request_unpins = &capture->unpins_list[req.buffer_index];
if (request_unpins->num_unpins != 0U) {
dev_err(chan->dev, "Descriptor is still in use by rtcpu\n");
mutex_unlock(&capture->unpins_list_lock);
return -EBUSY;
}
err = pin_vi_capture_request_buffers_locked(chan, &req,
request_unpins);
mutex_unlock(&capture->unpins_list_lock);
if (err < 0) {
dev_err(chan->dev,
"pin request failed\n");
vi_capture_request_unpin(chan, req.buffer_index);
break;
}
err = vi_capture_request(chan, &req);
if (err < 0) {
dev_err(chan->dev,
"vi capture request submit failed\n");
vi_capture_request_unpin(chan, req.buffer_index);
}
break;
}
case _IOC_NR(VI_CAPTURE_STATUS): {
uint32_t timeout_ms;
if (copy_from_user(&timeout_ms, ptr, sizeof(timeout_ms)))
break;
err = vi_capture_status(chan, timeout_ms);
if (err < 0)
dev_err(chan->dev,
"vi capture get status failed\n");
break;
}
case _IOC_NR(VI_CAPTURE_SET_PROGRESS_STATUS_NOTIFIER): {
struct vi_capture_progress_status_req req;
if (copy_from_user(&req, ptr, sizeof(req)))
break;
err = vi_capture_set_progress_status_notifier(chan, &req);
if (err < 0)
dev_err(chan->dev,
"setting progress status buffer failed\n");
break;
}
case _IOC_NR(VI_CAPTURE_BUFFER_REQUEST): {
struct vi_buffer_req req;
if (copy_from_user(&req, ptr, sizeof(req)) != 0U)
break;
err = capture_buffer_request(
capture->buf_ctx, req.mem, req.flag);
if (err < 0)
dev_err(chan->dev, "vi buffer request failed\n");
break;
}
default: {
dev_err(chan->dev, "%s:Unknown ioctl\n", __func__);
return -ENOIOCTLCMD;
}
}
return err;
}
static const struct file_operations vi_channel_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = vi_channel_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vi_channel_ioctl,
#endif
.open = vi_channel_open,
.release = vi_channel_release,
};
/* Character device */
static struct class *vi_channel_class;
static int vi_channel_major;
int vi_channel_drv_register(
struct platform_device *ndev,
unsigned int max_vi_channels)
{
struct vi_channel_drv *chan_drv;
int err = 0;
unsigned int i;
chan_drv = devm_kzalloc(&ndev->dev, sizeof(*chan_drv) +
max_vi_channels * sizeof(struct tegra_vi_channel *),
GFP_KERNEL);
if (unlikely(chan_drv == NULL))
return -ENOMEM;
if (strstr(ndev->name, "tegra-capture-vi") == NULL) {
chan_drv->use_legacy_path = true;
chan_drv->dev = &ndev->dev;
chan_drv->ndev = ndev;
} else {
chan_drv->use_legacy_path = false;
chan_drv->dev = NULL;
chan_drv->ndev = NULL;
chan_drv->vi_capture_pdev = ndev;
}
chan_drv->num_channels = max_vi_channels;
mutex_init(&chan_drv->lock);
mutex_lock(&chdrv_lock);
if (chdrv_ != NULL) {
mutex_unlock(&chdrv_lock);
WARN_ON(1);
err = -EBUSY;
goto error;
}
chdrv_ = chan_drv;
mutex_unlock(&chdrv_lock);
for (i = 0; i < chan_drv->num_channels; i++) {
dev_t devt = MKDEV(vi_channel_major, i);
struct device *dev =
(chan_drv->use_legacy_path)?chan_drv->dev :
&chan_drv->vi_capture_pdev->dev;
device_create(vi_channel_class, dev, devt, NULL,
"capture-vi-channel%u", i);
}
return 0;
error:
return err;
}
EXPORT_SYMBOL(vi_channel_drv_register);
int vi_channel_drv_fops_register(
const struct vi_channel_drv_ops *ops)
{
int err = 0;
struct vi_channel_drv *chan_drv;
chan_drv = chdrv_;
if (chan_drv == NULL) {
err = -EPROBE_DEFER;
goto error;
}
mutex_lock(&chdrv_lock);
if (chan_drv->ops == NULL)
chan_drv->ops = ops;
else
dev_warn(chan_drv->dev, "fops function table already registered\n");
mutex_unlock(&chdrv_lock);
return 0;
error:
return err;
}
EXPORT_SYMBOL(vi_channel_drv_fops_register);
void vi_channel_drv_unregister(
struct device *dev)
{
struct vi_channel_drv *chan_drv;
unsigned int i;
mutex_lock(&chdrv_lock);
chan_drv = chdrv_;
chdrv_ = NULL;
WARN_ON(chan_drv->dev != dev);
mutex_unlock(&chdrv_lock);
for (i = 0; i < chan_drv->num_channels; i++) {
dev_t devt = MKDEV(vi_channel_major, i);
device_destroy(vi_channel_class, devt);
}
devm_kfree(chan_drv->dev, chan_drv);
}
EXPORT_SYMBOL(vi_channel_drv_unregister);
/**
* @brief Initialize the VI channel driver device (major).
*
* @returns 0 (success), PTR_ERR or neg. VI channel major no. (failure)
*/
int vi_channel_drv_init(void)
{
vi_channel_class = class_create(THIS_MODULE, "capture-vi-channel");
if (IS_ERR(vi_channel_class))
return PTR_ERR(vi_channel_class);
vi_channel_major = register_chrdev(0, "capture-vi-channel",
&vi_channel_fops);
if (vi_channel_major < 0) {
class_destroy(vi_channel_class);
return vi_channel_major;
}
return 0;
}
/**
* @brief De-initialize the VI channel driver device (major).
*/
void vi_channel_drv_exit(void)
{
unregister_chrdev(vi_channel_major, "capture-vi-channel");
class_destroy(vi_channel_class);
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,120 @@
// SPDX-License-Identifier: GPL-2.0
/*
* nvcamera_log.c - general tracing function for vi and isp API calls
*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include "nvcamera_log.h"
#include <linux/nvhost.h>
#include <linux/platform_device.h>
#include <uapi/linux/nvhost_events.h>
/*
* Set to 1 to enable additional kernel API traces
*/
#define NVCAM_ENABLE_EXTRA_TRACES 0
#if defined(CONFIG_EVENTLIB)
#include <linux/keventlib.h>
/*
* Camera "task submission" event enabled by default
*/
void nv_camera_log_submit(struct platform_device *pdev,
u32 syncpt_id,
u32 syncpt_thresh,
u32 channel_id,
u64 timestamp)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvhost_task_submit task_submit;
if (!pdata->eventlib_id)
return;
/*
* Write task submit event
*/
task_submit.syncpt_id = syncpt_id;
task_submit.syncpt_thresh = syncpt_thresh;
task_submit.channel_id = channel_id;
task_submit.class_id = pdata->class;
/*
* Eventlib events are meant to be matched with their userspace
* analogues. Instead of the PID as (this) thread's ID use the
* inherited thread group ID. For the reported TID use this thread's
* ID (i.e. PID).
*/
task_submit.tid = current->pid;
task_submit.pid = current->tgid;
keventlib_write(pdata->eventlib_id,
&task_submit,
sizeof(task_submit),
NVHOST_TASK_SUBMIT,
timestamp);
}
#else
void nv_camera_log_submit(struct platform_device *pdev,
u32 syncpt_id,
u32 syncpt_thresh,
u32 channel_id,
u64 timestamp)
{
}
#endif
EXPORT_SYMBOL_GPL(nv_camera_log_submit);
#if defined(CONFIG_EVENTLIB) && NVCAM_ENABLE_EXTRA_TRACES
#include <linux/keventlib.h>
/*
* Additional camera traces disabled by default
*/
void nv_camera_log(struct platform_device *pdev,
u64 timestamp,
u32 type)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nv_camera_task_log task_log;
if (!pdata->eventlib_id)
return;
/*
* Write task log event
*/
task_log.class_id = pdata->class;
/*
* Eventlib events are meant to be matched with their userspace
* analogues. Instead of the PID as (this) thread's ID use the
* inherited thread group ID. For the reported TID use this thread's
* ID (i.e. PID).
*/
task_log.tid = current->pid;
task_log.pid = current->tgid;
keventlib_write(pdata->eventlib_id,
&task_log,
sizeof(task_log),
type,
timestamp);
}
#else
void nv_camera_log(struct platform_device *pdev,
u64 timestamp,
u32 type)
{
}
#endif
EXPORT_SYMBOL_GPL(nv_camera_log);

View File

@@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __NVCAMERA_LOG_H
#define __NVCAMERA_LOG_H
#include <linux/types.h>
struct platform_device;
void nv_camera_log_submit(struct platform_device *pdev,
u32 syncpt_id,
u32 syncpt_thresh,
u32 channel_id,
u64 timestamp);
void nv_camera_log(struct platform_device *pdev,
u64 timestamp,
u32 type);
#endif

View File

@@ -0,0 +1,519 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Tegra CSI5 device common APIs
*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/log2.h>
#include <media/csi.h>
#include <media/mc_common.h>
#include <media/csi5_registers.h>
#include "nvcsi/nvcsi.h"
#include "csi5_fops.h"
#include <linux/nospec.h>
#include <linux/nvhost.h>
#include <linux/tegra-capture-ivc.h>
#include "soc/tegra/camrtc-capture-messages.h"
#include <media/fusa-capture/capture-vi.h>
/* Referred from capture-scheduler.c defined in rtcpu-fw */
#define NUM_CAPTURE_CHANNELS 64
/* Temporary ids for the clients whose channel-id is not yet allocated */
#define NUM_CAPTURE_TRANSACTION_IDS 64
#define TOTAL_CHANNELS (NUM_CAPTURE_CHANNELS + NUM_CAPTURE_TRANSACTION_IDS)
static inline u32 csi5_port_to_stream(u32 csi_port)
{
return (csi_port < NVCSI_PORT_E) ?
csi_port : (((csi_port - NVCSI_PORT_E) >> 1U) + NVCSI_PORT_E);
}
static int csi5_power_on(struct tegra_csi_device *csi)
{
int err = 0;
dev_dbg(csi->dev, "%s\n", __func__);
err = nvhost_module_busy(csi->pdev);
if (err)
dev_err(csi->dev, "%s:cannot enable csi\n", __func__);
return err;
}
static int csi5_power_off(struct tegra_csi_device *csi)
{
dev_dbg(csi->dev, "%s\n", __func__);
nvhost_module_idle(csi->pdev);
return 0;
}
static int verify_capture_control_response(const uint32_t result)
{
int err = 0;
switch (result) {
case CAPTURE_OK:
{
err = 0;
break;
}
case CAPTURE_ERROR_INVALID_PARAMETER:
{
err = -EINVAL;
break;
}
case CAPTURE_ERROR_NO_MEMORY:
{
err = -ENOMEM;
break;
}
case CAPTURE_ERROR_BUSY:
{
err = -EBUSY;
break;
}
case CAPTURE_ERROR_NOT_SUPPORTED:
case CAPTURE_ERROR_NOT_INITIALIZED:
{
err = -EPERM;
break;
}
case CAPTURE_ERROR_OVERFLOW:
{
err = -EOVERFLOW;
break;
}
case CAPTURE_ERROR_NO_RESOURCES:
{
err = -ENODEV;
break;
}
default:
{
err = -EINVAL;
break;
}
}
return err;
}
static int csi5_send_control_message(
struct tegra_vi_channel *chan,
struct CAPTURE_CONTROL_MSG *msg,
uint32_t *result)
{
int err = 0;
struct vi_capture_control_msg vi_msg;
(void) memset(&vi_msg, 0, sizeof(vi_msg));
vi_msg.ptr = (uint64_t)msg;
vi_msg.size = sizeof(*msg);
vi_msg.response = (uint64_t)msg;
err = vi_capture_control_message(chan, &vi_msg);
if (err < 0)
return err;
return verify_capture_control_response(*result);
}
static int csi5_stream_open(struct tegra_csi_channel *chan, u32 stream_id,
u32 csi_port)
{
struct tegra_csi_device *csi = chan->csi;
struct tegra_channel *tegra_chan =
v4l2_get_subdev_hostdata(&chan->subdev);
struct CAPTURE_CONTROL_MSG msg;
int vi_port = 0;
/* If the tegra_vi_channel is NULL it means that is PCL TPG usecase where fusa UMD opens the
* VI channel and sends channel messages but for CSI messages it uses this V4L2 path.
* In such a case query fusacapture KMD for the tegra_vi_channel associated with the
* current stream id/vc id combination.
* If still NULL, we are in erroroneous state, exit with error.
*/
if (tegra_chan->tegra_vi_channel[0] == NULL) {
tegra_chan->tegra_vi_channel[0] = get_tegra_vi_channel(stream_id,
tegra_chan->virtual_channel);
if (tegra_chan->tegra_vi_channel[0] == NULL) {
dev_err(csi->dev, "%s: VI channel not found for stream- %d vc- %d\n",
__func__,stream_id,tegra_chan->virtual_channel);
return -EINVAL;
}
}
/* Open NVCSI stream */
memset(&msg, 0, sizeof(msg));
msg.header.msg_id = CAPTURE_PHY_STREAM_OPEN_REQ;
msg.phy_stream_open_req.stream_id = stream_id;
msg.phy_stream_open_req.csi_port = csi_port;
if (tegra_chan->valid_ports > 1)
vi_port = (stream_id > 0) ? 1 : 0;
else
vi_port = 0;
return csi5_send_control_message(tegra_chan->tegra_vi_channel[vi_port], &msg,
&msg.phy_stream_open_resp.result);
}
static void csi5_stream_close(struct tegra_csi_channel *chan, u32 stream_id,
u32 csi_port)
{
struct tegra_csi_device *csi = chan->csi;
struct tegra_channel *tegra_chan =
v4l2_get_subdev_hostdata(&chan->subdev);
int err = 0;
int vi_port = 0;
struct CAPTURE_CONTROL_MSG msg;
/* Close NVCSI stream */
memset(&msg, 0, sizeof(msg));
msg.header.msg_id = CAPTURE_PHY_STREAM_CLOSE_REQ;
msg.phy_stream_close_req.stream_id = stream_id;
msg.phy_stream_close_req.csi_port = csi_port;
if (tegra_chan->valid_ports > 1)
vi_port = (stream_id > 0) ? 1 : 0;
else
vi_port = 0;
err = csi5_send_control_message(tegra_chan->tegra_vi_channel[vi_port], &msg,
&msg.phy_stream_open_resp.result);
if (err < 0) {
dev_err(csi->dev, "%s: Error in closing stream_id=%u, csi_port=%u\n",
__func__, stream_id, csi_port);
}
return;
}
static int csi5_stream_set_config(struct tegra_csi_channel *chan, u32 stream_id,
u32 csi_port, int csi_lanes)
{
struct tegra_csi_device *csi = chan->csi;
struct tegra_channel *tegra_chan =
v4l2_get_subdev_hostdata(&chan->subdev);
struct camera_common_data *s_data = chan->s_data;
const struct sensor_mode_properties *mode = NULL;
unsigned int cil_settletime = 0;
int vi_port = 0;
struct CAPTURE_CONTROL_MSG msg;
struct nvcsi_brick_config brick_config;
struct nvcsi_cil_config cil_config;
bool is_cphy = (csi_lanes == 3);
dev_dbg(csi->dev, "%s: stream_id=%u, csi_port=%u\n",
__func__, stream_id, csi_port);
/* Attempt to find the cil_settingtime from the device tree */
if (s_data) {
int idx = s_data->mode_prop_idx;
dev_dbg(csi->dev, "cil_settingtime is pulled from device");
if (idx < s_data->sensor_props.num_modes) {
mode = &s_data->sensor_props.sensor_modes[idx];
cil_settletime = mode->signal_properties.cil_settletime;
} else {
dev_dbg(csi->dev, "mode not listed in DT, use default");
cil_settletime = 0;
}
} else if (chan->of_node) {
int err = 0;
const char *str;
dev_dbg(csi->dev,
"cil_settletime is pulled from device of_node");
err = of_property_read_string(chan->of_node, "cil_settletime",
&str);
if (!err) {
err = kstrtou32(str, 10, &cil_settletime);
if (err) {
dev_dbg(csi->dev,
"no cil_settletime in of_node");
cil_settletime = 0;
}
}
}
/* Brick config */
memset(&brick_config, 0, sizeof(brick_config));
brick_config.phy_mode = (!is_cphy) ?
NVCSI_PHY_TYPE_DPHY : NVCSI_PHY_TYPE_CPHY;
/* CIL config */
memset(&cil_config, 0, sizeof(cil_config));
cil_config.num_lanes = csi_lanes;
cil_config.lp_bypass_mode = is_cphy ? 0 : 1;
cil_config.t_hs_settle = cil_settletime;
if (s_data && !chan->pg_mode)
cil_config.mipi_clock_rate = read_mipi_clk_from_dt(chan) / 1000;
else
cil_config.mipi_clock_rate = csi->clk_freq / 1000;
/* Set NVCSI stream config */
memset(&msg, 0, sizeof(msg));
msg.header.msg_id = CAPTURE_CSI_STREAM_SET_CONFIG_REQ;
msg.csi_stream_set_config_req.stream_id = stream_id;
msg.csi_stream_set_config_req.csi_port = csi_port;
msg.csi_stream_set_config_req.brick_config = brick_config;
msg.csi_stream_set_config_req.cil_config = cil_config;
if (tegra_chan->valid_ports > 1)
vi_port = (stream_id > 0) ? 1 : 0;
else
vi_port = 0;
return csi5_send_control_message(tegra_chan->tegra_vi_channel[vi_port], &msg,
&msg.csi_stream_set_config_resp.result);
}
static int csi5_stream_tpg_start(struct tegra_csi_channel *chan, u32 stream_id,
u32 virtual_channel_id)
{
int err = 0;
struct tegra_csi_device *csi = chan->csi;
struct tegra_csi_port *port = &chan->ports[0];
struct tegra_channel *tegra_chan =
v4l2_get_subdev_hostdata(&chan->subdev);
struct CAPTURE_CONTROL_MSG msg;
union nvcsi_tpg_config *tpg_config = NULL;
dev_dbg(csi->dev, "%s: stream_id=%u, virtual_channel_id=%d\n",
__func__, stream_id, virtual_channel_id);
/* Set TPG config for a virtual channel */
memset(&msg, 0, sizeof(msg));
msg.header.msg_id = CAPTURE_CSI_STREAM_TPG_SET_CONFIG_REQ;
tpg_config = &(msg.csi_stream_tpg_set_config_req.tpg_config);
csi->get_tpg_settings(port, tpg_config);
err = csi5_send_control_message(tegra_chan->tegra_vi_channel[0], &msg,
&msg.csi_stream_tpg_set_config_resp.result);
if (err < 0) {
dev_err(csi->dev, "%s: Error in TPG set config stream_id=%u, csi_port=%u\n",
__func__, port->stream_id, port->csi_port);
}
/* Enable TPG on a stream */
memset(&msg, 0, sizeof(msg));
msg.header.msg_id = CAPTURE_CSI_STREAM_TPG_START_RATE_REQ;
msg.csi_stream_tpg_start_rate_req.stream_id = stream_id;
msg.csi_stream_tpg_start_rate_req.virtual_channel_id = virtual_channel_id;
msg.csi_stream_tpg_start_rate_req.frame_rate = port->framerate;
err = csi5_send_control_message(tegra_chan->tegra_vi_channel[0], &msg,
&msg.csi_stream_tpg_start_resp.result);
if (err < 0) {
dev_err(csi->dev, "%s: Error in TPG start stream_id=%u, csi_port=%u\n",
__func__, port->stream_id, port->csi_port);
}
return err;
}
static void csi5_stream_tpg_stop(struct tegra_csi_channel *chan, u32 stream_id,
u32 virtual_channel_id)
{
struct tegra_csi_device *csi = chan->csi;
struct tegra_channel *tegra_chan =
v4l2_get_subdev_hostdata(&chan->subdev);
int err = 0;
struct CAPTURE_CONTROL_MSG msg;
dev_dbg(csi->dev, "%s: stream_id=%u, virtual_channel_id=%d\n",
__func__, stream_id, virtual_channel_id);
/* Disable TPG on a stream */
memset(&msg, 0, sizeof(msg));
msg.header.msg_id = CAPTURE_CSI_STREAM_TPG_STOP_REQ;
msg.csi_stream_tpg_stop_req.stream_id = stream_id;
msg.csi_stream_tpg_stop_req.virtual_channel_id = virtual_channel_id;
err = csi5_send_control_message(tegra_chan->tegra_vi_channel[0], &msg,
&msg.csi_stream_tpg_stop_resp.result);
if (err < 0) {
dev_err(csi->dev, "%s: Error in TPG stop stream_id=%u\n",
__func__, stream_id);
}
}
/* Transform the user mode setting to TPG recoginzable equivalent. Gain ratio
* supported by TPG is in range of 0.125 to 8. From userspace we multiply the
* gain setting by 8, before v4l2 ioctl call. It is tranformed before
* IVC message
*/
static uint32_t get_tpg_gain_ratio_setting(int gain_ratio_tpg)
{
const uint32_t tpg_gain_ratio_settings[] = {
CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_ONE_EIGHTH,
CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_ONE_FOURTH,
CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_HALF,
CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_NONE,
CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_TWO_TO_ONE,
CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_FOUR_TO_ONE,
CAPTURE_CSI_STREAM_TPG_GAIN_RATIO_EIGHT_TO_ONE};
return tpg_gain_ratio_settings[order_base_2(gain_ratio_tpg)];
}
int csi5_tpg_set_gain(struct tegra_csi_channel *chan, int gain_ratio_tpg)
{
struct tegra_csi_device *csi = chan->csi;
struct tegra_csi_port *port = &chan->ports[0];
struct tegra_channel *tegra_chan =
v4l2_get_subdev_hostdata(&chan->subdev);
int err = 0;
struct CAPTURE_CONTROL_MSG msg;
if (!chan->pg_mode) {
dev_err(csi->dev, "Gain to be set only in TPG mode\n");
return -EINVAL;
}
if (tegra_chan->tegra_vi_channel[0] == NULL) {
/* We come here during initial v4l2 ctrl setup during TPG LKM
* loading
*/
dev_dbg(csi->dev, "VI channel is not setup yet\n");
return 0;
}
(void)memset(&msg, 0, sizeof(msg));
msg.header.msg_id = CAPTURE_CSI_STREAM_TPG_APPLY_GAIN_REQ;
msg.csi_stream_tpg_apply_gain_req.stream_id = port->stream_id;
msg.csi_stream_tpg_apply_gain_req.virtual_channel_id =
port->virtual_channel_id;
msg.csi_stream_tpg_apply_gain_req.gain_ratio =
get_tpg_gain_ratio_setting(gain_ratio_tpg);
err = csi5_send_control_message(tegra_chan->tegra_vi_channel[0], &msg,
&msg.csi_stream_tpg_apply_gain_resp.result);
if (err < 0) {
dev_err(csi->dev, "%s: Error in setting TPG gain stream_id=%u, csi_port=%u\n",
__func__, port->stream_id, port->csi_port);
}
return err;
}
static int csi5_start_streaming(struct tegra_csi_channel *chan, int port_idx)
{
int err = 0, num_lanes;
struct tegra_csi_device *csi = chan->csi;
struct tegra_csi_port *port = &chan->ports[port_idx];
u32 csi_pt, st_id, vc_id;
if (chan->pg_mode) {
csi_pt = NVCSI_PORT_UNSPECIFIED;
st_id = port->stream_id;
} else {
csi_pt = port->csi_port;
st_id = csi5_port_to_stream(port->csi_port);
}
vc_id = port->virtual_channel_id;
num_lanes = port->lanes;
dev_dbg(csi->dev, "%s: csi_pt=%u, st_id=%u, vc_id=%u, pg_mode=0x%x\n",
__func__, csi_pt, st_id, vc_id, chan->pg_mode);
if (!chan->pg_mode)
csi5_stream_set_config(chan, st_id, csi_pt, num_lanes);
csi5_stream_open(chan, st_id, csi_pt);
if (chan->pg_mode) {
err = csi5_stream_tpg_start(chan, st_id, vc_id);
if (err)
return err;
}
return err;
}
static void csi5_stop_streaming(struct tegra_csi_channel *chan, int port_idx)
{
struct tegra_csi_device *csi = chan->csi;
struct tegra_csi_port *port = &chan->ports[port_idx];
u32 csi_pt, st_id, vc_id;
if (chan->pg_mode) {
csi_pt = NVCSI_PORT_UNSPECIFIED;
st_id = port->stream_id;
} else {
csi_pt = port->csi_port;
st_id = csi5_port_to_stream(port->csi_port);
}
vc_id = port->virtual_channel_id;
dev_dbg(csi->dev, "%s: csi_pt=%u, st_id=%u, vc_id=%u, pg_mode=0x%x\n",
__func__, csi_pt, st_id, vc_id, chan->pg_mode);
if (chan->pg_mode)
csi5_stream_tpg_stop(chan, st_id, vc_id);
csi5_stream_close(chan, st_id, csi_pt);
}
static int csi5_error_recover(struct tegra_csi_channel *chan, int port_idx)
{
int err = 0;
struct tegra_csi_device *csi = chan->csi;
struct tegra_csi_port *port = &chan->ports[0];
csi5_stop_streaming(chan, port_idx);
err = csi5_start_streaming(chan, port_idx);
if (err) {
dev_err(csi->dev, "failed to restart csi stream %d\n",
csi5_port_to_stream(port->csi_port));
}
return err;
}
static int csi5_mipi_cal(struct tegra_csi_channel *chan)
{
/* Camera RTCPU handles MIPI calibration */
return 0;
}
static int csi5_hw_init(struct tegra_csi_device *csi)
{
dev_dbg(csi->dev, "%s\n", __func__);
csi->iomem[0] = csi->iomem_base + CSI5_TEGRA_CSI_STREAM_0_BASE;
csi->iomem[1] = csi->iomem_base + CSI5_TEGRA_CSI_STREAM_2_BASE;
csi->iomem[2] = csi->iomem_base + CSI5_TEGRA_CSI_STREAM_4_BASE;
return 0;
}
struct tegra_csi_fops csi5_fops = {
.csi_power_on = csi5_power_on,
.csi_power_off = csi5_power_off,
.csi_start_streaming = csi5_start_streaming,
.csi_stop_streaming = csi5_stop_streaming,
.csi_error_recover = csi5_error_recover,
.mipical = csi5_mipi_cal,
.hw_init = csi5_hw_init,
.tpg_set_gain = csi5_tpg_set_gain,
};
EXPORT_SYMBOL(csi5_fops);

View File

@@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra CSI5 device common APIs
*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __CSI5_H__
#define __CSI5_H__
extern struct tegra_csi_fops csi5_fops;
int csi5_tpg_set_gain(struct tegra_csi_channel *chan, int gain_ratio_tpg);
#endif

View File

@@ -0,0 +1,169 @@
// SPDX-License-Identifier: GPL-2.0
/*
* regmap_util.c - utilities for writing regmap tables
*
* Copyright (c) 2013-2022, NVIDIA Corporation. All Rights Reserved.
*/
#include <linux/regmap.h>
#include <linux/module.h>
#include <media/camera_common.h>
int
regmap_util_write_table_8(struct regmap *regmap,
const struct reg_8 table[],
const struct reg_8 override_list[],
int num_override_regs, u16 wait_ms_addr, u16 end_addr)
{
int err = 0;
const struct reg_8 *next;
int i;
u8 val;
int range_start = -1;
unsigned int range_count = 0;
/* bug 200048392 -
* the vi i2c cannot take a FIFO buffer bigger than 16 bytes
*/
u8 range_vals[16];
int max_range_vals = ARRAY_SIZE(range_vals);
for (next = table;; next++) {
/* If we have a range open and */
/* either the address doesn't match */
/* or the temporary storage is full, flush */
if ((next->addr != range_start + range_count) ||
(next->addr == end_addr) ||
(next->addr == wait_ms_addr) ||
(range_count == max_range_vals)) {
if (range_count == 1) {
err =
regmap_write(regmap, range_start,
range_vals[0]);
} else if (range_count > 1) {
err =
regmap_bulk_write(regmap, range_start,
&range_vals[0],
range_count);
}
if (err) {
pr_err("%s:regmap_util_write_table:%d",
__func__, err);
return err;
}
range_start = -1;
range_count = 0;
/* Handle special address values */
if (next->addr == end_addr)
break;
if (next->addr == wait_ms_addr) {
msleep_range(next->val);
continue;
}
}
val = next->val;
/* When an override list is passed in, replace the reg */
/* value to write if the reg is in the list */
if (override_list) {
for (i = 0; i < num_override_regs; i++) {
if (next->addr == override_list[i].addr) {
val = override_list[i].val;
break;
}
}
}
if (range_start == -1)
range_start = next->addr;
range_vals[range_count++] = val;
}
return 0;
}
EXPORT_SYMBOL_GPL(regmap_util_write_table_8);
int
regmap_util_write_table_16_as_8(struct regmap *regmap,
const struct reg_16 table[],
const struct reg_16 override_list[],
int num_override_regs,
u16 wait_ms_addr, u16 end_addr)
{
int err = 0;
const struct reg_16 *next;
int i;
u16 val;
int range_start = -1;
unsigned int range_count = 0;
u8 range_vals[256];
int max_range_vals = ARRAY_SIZE(range_vals) - 1;
for (next = table;; next++) {
/* If we have a range open and */
/* either the address doesn't match */
/* or the temporary storage is full, flush*/
if ((next->addr != range_start + range_count) ||
(next->addr == end_addr) ||
(next->addr == wait_ms_addr) ||
(range_count == max_range_vals)) {
if (range_count > 1) {
err =
regmap_bulk_write(regmap, range_start,
&range_vals[0],
range_count);
}
if (err) {
pr_err("%s:regmap_util_write_table:%d",
__func__, err);
return err;
}
range_start = -1;
range_count = 0;
/* Handle special address values */
if (next->addr == end_addr)
break;
if (next->addr == wait_ms_addr) {
msleep_range(next->val);
continue;
}
}
val = next->val;
/* When an override list is passed in, replace the reg */
/* value to write if the reg is in the list */
if (override_list) {
for (i = 0; i < num_override_regs; i++) {
if (next->addr == override_list[i].addr) {
val = override_list[i].val;
break;
}
}
}
if (range_start == -1)
range_start = next->addr;
range_vals[range_count++] = (u8) (val >> 8);
range_vals[range_count++] = (u8) (val & 0xFF);
}
return 0;
}
EXPORT_SYMBOL_GPL(regmap_util_write_table_16_as_8);
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,864 @@
// SPDX-License-Identifier: GPL-2.0
/*
* sensor_common.c - utilities for tegra sensor drivers
*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <media/sensor_common.h>
#include <linux/of_graph.h>
#include <linux/string.h>
#include <linux/version.h>
static int read_property_u32(
struct device_node *node, const char *name, u32 *value)
{
const char *str;
int err = 0;
err = of_property_read_string(node, name, &str);
if (err)
return -ENODATA;
err = kstrtou32(str, 10, value);
if (err)
return -EFAULT;
return 0;
}
static int read_property_u64(
struct device_node *node, const char *name, u64 *value)
{
const char *str;
int err = 0;
err = of_property_read_string(node, name, &str);
if (err)
return -ENODATA;
err = kstrtou64(str, 10, value);
if (err)
return -EFAULT;
return 0;
}
static int sensor_common_parse_signal_props(
struct device *dev, struct device_node *node,
struct sensor_signal_properties *signal)
{
const char *temp_str;
int err = 0;
u32 value = 0;
u64 val64 = 0;
u64 rate;
int depth;
err = of_property_read_string(node, "phy_mode", &temp_str);
if (err) {
dev_dbg(dev, "%s: use default phy mode DPHY\n", __func__);
signal->phy_mode = CSI_PHY_MODE_DPHY;
} else {
if (strcmp(temp_str, "CPHY") == 0)
signal->phy_mode = CSI_PHY_MODE_CPHY;
else if (strcmp(temp_str, "DPHY") == 0)
signal->phy_mode = CSI_PHY_MODE_DPHY;
else if (strcmp(temp_str, "SLVS") == 0)
signal->phy_mode = SLVS_EC;
else {
dev_err(dev, "%s: Invalid Phy mode\n", __func__);
return -EINVAL;
}
}
/* Do not report error for these properties yet */
err = read_property_u32(node, "readout_orientation", &value);
if (err)
signal->readout_orientation = 0;
else
signal->readout_orientation = value;
err = read_property_u32(node, "mclk_khz", &value);
if (err)
signal->mclk_freq = 0;
else
signal->mclk_freq = value;
err = read_property_u32(node, "num_lanes", &value);
if (err) {
dev_err(dev, "%s:num_lanes property missing\n", __func__);
return err;
}
signal->num_lanes = value;
err = read_property_u64(node, "pix_clk_hz", &val64);
if (err) {
dev_err(dev, "%s:pix_clk_hz property missing\n", __func__);
return err;
}
signal->pixel_clock.val = val64;
err = read_property_u64(node, "serdes_pix_clk_hz", &val64);
if (err)
signal->serdes_pixel_clock.val = 0;
else
signal->serdes_pixel_clock.val = val64;
if (signal->serdes_pixel_clock.val != 0ULL) {
if (signal->serdes_pixel_clock.val < signal->pixel_clock.val) {
dev_err(dev,
"%s: serdes_pix_clk_hz is lower than pix_clk_hz!\n",
__func__);
return -EINVAL;
}
rate = signal->serdes_pixel_clock.val;
} else {
rate = signal->pixel_clock.val;
}
err = read_property_u32(node, "csi_pixel_bit_depth", &depth);
if (err) {
dev_err(dev,
"%s:csi_pixel_bit_depth property missing.\n",
__func__);
return err;
}
/* Convert pixel rate to lane data rate */
rate = rate * depth / signal->num_lanes;
if (signal->phy_mode == CSI_PHY_MODE_DPHY) {
/* MIPI clock rate */
signal->mipi_clock.val = rate / 2;
} else if (signal->phy_mode == CSI_PHY_MODE_CPHY) {
/* Symbol rate */
signal->mipi_clock.val = rate * 7 / 16;
} else {
/* Data rate */
signal->mipi_clock.val = rate;
}
err = read_property_u32(node, "cil_settletime", &value);
if (err)
signal->cil_settletime = 0;
else
signal->cil_settletime = value;
/* initialize default if this prop not available */
err = of_property_read_string(node, "discontinuous_clk", &temp_str);
if (!err)
signal->discontinuous_clk =
!strncmp(temp_str, "yes", sizeof("yes"));
else
signal->discontinuous_clk = 1;
/* initialize default if this prop not available */
err = of_property_read_string(node, "dpcm_enable", &temp_str);
if (!err)
signal->dpcm_enable =
!strncmp(temp_str, "true", sizeof("true"));
else
signal->dpcm_enable = 0;
/* initialize default if this prop not available */
err = of_property_read_string(node,
"deskew_initial_enable", &temp_str);
if (!err)
signal->deskew_initial_enable =
!strncmp(temp_str, "true", sizeof("true"));
else
signal->deskew_initial_enable = 0;
err = of_property_read_string(node,
"deskew_periodic_enable", &temp_str);
if (!err)
signal->deskew_periodic_enable =
!strncmp(temp_str, "true", sizeof("true"));
else
signal->deskew_periodic_enable = 0;
err = of_property_read_string(node, "tegra_sinterface", &temp_str);
if (err) {
dev_err(dev,
"%s: tegra_sinterface property missing\n", __func__);
return err;
}
if (strcmp(temp_str, "serial_a") == 0)
signal->tegra_sinterface = 0;
else if (strcmp(temp_str, "serial_b") == 0)
signal->tegra_sinterface = 1;
else if (strcmp(temp_str, "serial_c") == 0)
signal->tegra_sinterface = 2;
else if (strcmp(temp_str, "serial_d") == 0)
signal->tegra_sinterface = 3;
else if (strcmp(temp_str, "serial_e") == 0)
signal->tegra_sinterface = 4;
else if (strcmp(temp_str, "serial_f") == 0)
signal->tegra_sinterface = 5;
else if (strcmp(temp_str, "serial_g") == 0)
signal->tegra_sinterface = 6;
else if (strcmp(temp_str, "serial_h") == 0)
signal->tegra_sinterface = 7;
else if (strcmp(temp_str, "host") == 0)
signal->tegra_sinterface = 0; /* for vivid driver */
else {
dev_err(dev,
"%s: tegra_sinterface property out of range\n",
__func__);
return -EINVAL;
}
return 0;
}
static int extract_pixel_format(
const char *pixel_t, u32 *format)
{
size_t size = strnlen(pixel_t, OF_MAX_STR_LEN);
if (strncmp(pixel_t, "bayer_bggr10", size) == 0)
*format = V4L2_PIX_FMT_SBGGR10;
else if (strncmp(pixel_t, "bayer_rggb10", size) == 0)
*format = V4L2_PIX_FMT_SRGGB10;
else if (strncmp(pixel_t, "bayer_grbg10", size) == 0)
*format = V4L2_PIX_FMT_SGRBG10;
else if (strncmp(pixel_t, "bayer_gbrg10", size) == 0)
*format = V4L2_PIX_FMT_SGBRG10;
else if (strncmp(pixel_t, "bayer_bggr12", size) == 0)
*format = V4L2_PIX_FMT_SBGGR12;
else if (strncmp(pixel_t, "bayer_rggb12", size) == 0)
*format = V4L2_PIX_FMT_SRGGB12;
else if (strncmp(pixel_t, "bayer_gbrg12", size) == 0)
*format = V4L2_PIX_FMT_SGBRG12;
else if (strncmp(pixel_t, "bayer_grbg12", size) == 0)
*format = V4L2_PIX_FMT_SGRBG12;
else if (strncmp(pixel_t, "rgb_rgb88824", size) == 0)
*format = V4L2_PIX_FMT_RGB24;
else if (strncmp(pixel_t, "bayer_wdr_pwl_rggb12", size) == 0)
*format = V4L2_PIX_FMT_SRGGB12;
else if (strncmp(pixel_t, "bayer_wdr_pwl_gbrg12", size) == 0)
*format = V4L2_PIX_FMT_SGBRG12;
else if (strncmp(pixel_t, "bayer_wdr_pwl_grbg12", size) == 0)
*format = V4L2_PIX_FMT_SGRBG12;
else if (strncmp(pixel_t, "bayer_wdr_dol_rggb10", size) == 0)
*format = V4L2_PIX_FMT_SRGGB10;
#if 0 /* disable for Canonical kenrel */
else if (strncmp(pixel_t, "bayer_xbggr10p", size) == 0)
*format = V4L2_PIX_FMT_XBGGR10P;
else if (strncmp(pixel_t, "bayer_xrggb10p", size) == 0)
*format = V4L2_PIX_FMT_XRGGB10P;
#endif
else if (strncmp(pixel_t, "yuv_yuyv16", size) == 0)
*format = V4L2_PIX_FMT_YUYV;
else if (strncmp(pixel_t, "yuv_yvyu16", size) == 0)
*format = V4L2_PIX_FMT_YVYU;
else if (strncmp(pixel_t, "yuv_uyvy16", size) == 0)
*format = V4L2_PIX_FMT_UYVY;
else if (strncmp(pixel_t, "yuv_vyuy16", size) == 0)
*format = V4L2_PIX_FMT_VYUY;
else {
pr_err("%s: Need to extend format%s\n", __func__, pixel_t);
return -EINVAL;
}
return 0;
}
static int sensor_common_parse_image_props(
struct device *dev, struct device_node *node,
struct sensor_image_properties *image)
{
const char *temp_str;
int err = 0, ret = 0;
const char *phase_str, *mode_str;
int depth;
char pix_format[24];
u32 value = 0;
err = read_property_u32(node, "active_w",
&image->width);
if (err) {
dev_err(dev, "%s:active_w property missing\n", __func__);
goto fail;
}
err = read_property_u32(node, "active_h",
&image->height);
if (err) {
dev_err(dev, "%s:active_h property missing\n", __func__);
goto fail;
}
err = read_property_u32(node, "line_length",
&image->line_length);
if (err) {
dev_err(dev, "%s:Line length property missing\n", __func__);
goto fail;
}
/* embedded_metadata_height is optional */
err = read_property_u32(node, "embedded_metadata_height", &value);
if (err)
image->embedded_metadata_height = 0;
else
image->embedded_metadata_height = value;
err = of_property_read_string(node, "pixel_t", &temp_str);
if (err) {
/* pixel_t missing is only an error if alternate not provided */
/* check for alternative format string */
err = of_property_read_string(node, "pixel_phase", &phase_str);
if (err) {
dev_err(dev,
"%s:pixel_phase property missing.\n",
__func__);
dev_err(dev,
"%s:Either pixel_t or alternate must be present.\n",
__func__);
goto fail;
}
err = of_property_read_string(node, "mode_type", &mode_str);
if (err) {
dev_err(dev,
"%s:mode_type property missing.\n",
__func__);
dev_err(dev,
"%s:Either pixel_t or alternate must be present.\n",
__func__);
goto fail;
}
err = read_property_u32(node, "csi_pixel_bit_depth", &depth);
if (err) {
dev_err(dev,
"%s:csi_pixel_bit_depth property missing.\n",
__func__);
dev_err(dev,
"%s:Either pixel_t or alternate must be present.\n",
__func__);
goto fail;
}
ret = sprintf(pix_format, "%s_%s%d", mode_str, phase_str, depth);
if (ret < 0)
return -EINVAL;
temp_str = pix_format;
}
err = extract_pixel_format(temp_str, &image->pixel_format);
if (err) {
dev_err(dev, "Unsupported pixel format\n");
goto fail;
}
fail:
return err;
}
static int sensor_common_parse_dv_timings(
struct device *dev, struct device_node *node,
struct sensor_dv_timings *timings)
{
int err = 0;
u32 value = 0;
/* Do not report error for these properties yet */
err = read_property_u32(node, "horz_front_porch", &value);
if (err)
timings->hfrontporch = 0;
else
timings->hfrontporch = value;
err = read_property_u32(node, "horz_sync", &value);
if (err)
timings->hsync = 0;
else
timings->hsync = value;
err = read_property_u32(node, "horz_back_porch", &value);
if (err)
timings->hbackporch = 0;
else
timings->hbackporch = value;
err = read_property_u32(node, "vert_front_porch", &value);
if (err)
timings->vfrontporch = 0;
else
timings->vfrontporch = value;
err = read_property_u32(node, "vert_sync", &value);
if (err)
timings->vsync = 0;
else
timings->vsync = value;
err = read_property_u32(node, "vert_back_porch", &value);
if (err)
timings->vbackporch = 0;
else
timings->vbackporch = value;
return 0;
}
static int sensor_common_parse_control_props(
struct device *dev, struct device_node *node,
struct sensor_control_properties *control)
{
int err = 0;
u32 value = 0;
u64 val64 = 0;
err = read_property_u32(node, "gain_factor", &value);
if (err) {
dev_dbg(dev, "%s:%s:property missing\n",
__func__, "gain_factor");
control->gain_factor = 1;
return 0;
} else
control->gain_factor = value;
err = read_property_u32(node, "framerate_factor", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "framerate_factor");
control->framerate_factor = 1;
} else
control->framerate_factor = value;
err = read_property_u32(node, "exposure_factor", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "exposure_factor");
control->exposure_factor = 1;
} else
control->exposure_factor = value;
/* ignore err for this prop */
err = read_property_u32(node, "inherent_gain", &value);
if (err)
control->inherent_gain = 0;
else
control->inherent_gain = value;
err = read_property_u32(node, "min_gain_val", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "min_gain_val");
control->min_gain_val = 0;
} else
control->min_gain_val = value;
err = read_property_u32(node, "max_gain_val", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "max_gain_val");
control->max_gain_val = 0;
} else
control->max_gain_val = value;
err = read_property_u32(node, "step_gain_val", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "step_gain_val");
control->step_gain_val = 0;
} else
control->step_gain_val = value;
/* ignore err for this prop */
err = read_property_u32(node, "min_hdr_ratio", &value);
if (err)
control->min_hdr_ratio = 1;
else
control->min_hdr_ratio = value;
err = read_property_u32(node, "max_hdr_ratio", &value);
if (err)
control->max_hdr_ratio = 1;
else
control->max_hdr_ratio = value;
err = read_property_u32(node, "min_framerate", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "min_framerate");
control->min_framerate = 0;
} else
control->min_framerate = value;
err = read_property_u32(node, "max_framerate", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "max_framerate");
control->max_framerate = 0;
} else
control->max_framerate = value;
err = read_property_u32(node, "step_framerate", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "step_framerate");
control->step_framerate = 0;
} else
control->step_framerate = value;
err = read_property_u64(node, "min_exp_time", &val64);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "min_exp_time");
control->min_exp_time.val = 0;
}
control->min_exp_time.val = val64;
err = read_property_u64(node, "max_exp_time", &val64);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "max_exp_time");
control->max_exp_time.val = 0;
} else
control->max_exp_time.val = val64;
err = read_property_u64(node, "step_exp_time", &val64);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "step_exp_time");
control->step_exp_time.val = 0;
} else
control->step_exp_time.val = val64;
err = read_property_u32(node, "default_gain", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "default_gain");
control->default_gain = 0;
} else
control->default_gain = value;
err = read_property_u32(node, "default_framerate", &value);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "default_framerate");
control->default_framerate = 0;
} else
control->default_framerate = value;
err = read_property_u64(node, "default_exp_time", &val64);
if (err) {
dev_err(dev, "%s:%s:property missing\n",
__func__, "default_exp_time");
control->default_exp_time.val = 0;
} else
control->default_exp_time.val = val64;
err = read_property_u32(node, "is_interlaced", &value);
if (err)
control->is_interlaced = 0;
else
control->is_interlaced = value;
err = read_property_u32(node, "interlaced_type", &value);
if (err)
control->interlace_type = 0;
else
control->interlace_type = value;
return 0;
}
int sensor_common_parse_num_modes(const struct device *dev)
{
struct device_node *np;
struct device_node *node = NULL;
char temp_str[OF_MAX_STR_LEN];
int num_modes = 0;
int i, ret;
if (!dev || !dev->of_node)
return 0;
np = dev->of_node;
for (i = 0; num_modes < MAX_NUM_SENSOR_MODES; i++) {
ret = snprintf(temp_str, sizeof(temp_str), "%s%d",
OF_SENSORMODE_PREFIX, i);
if (ret < 0)
return 0;
node = of_get_child_by_name(np, temp_str);
of_node_put(node);
if (node == NULL)
break;
num_modes++;
}
return num_modes;
}
EXPORT_SYMBOL(sensor_common_parse_num_modes);
static int sensor_common_init_i2c_device_config(
struct device *dev, struct device_node *np,
struct sensor_cfg *cfg)
{
struct i2c_sensor_cfg *i2c_sensor = &cfg->u.i2c_sensor;
struct device_node *node = NULL;
struct device_node *parent = NULL;
int err = 0;
u32 value = 0;
bool is_mux_valid = 0;
cfg->type = CAMERA_DEVICE_I2C_SENSOR;
err = of_property_read_u32(np, "reg", &value);
if (err) {
dev_err(dev, "sensor address unavailable\n");
return err;
}
/* Reading more devices has to be supported */
i2c_sensor->num_devs = 1;
i2c_sensor->sd[0].addr = value;
parent = of_get_parent(np);
/* verify the parent is mux or i2c bus */
is_mux_valid =
of_property_read_bool(parent, "i2c-mux,deselect-on-exit");
i2c_sensor->mux.is_mux_valid = is_mux_valid;
if (is_mux_valid) {
/* at mux port read the mux channel */
err = of_property_read_u32(parent, "reg", &value);
if (err) {
dev_err(dev, "mux channel unavailable\n");
return err;
}
i2c_sensor->mux.mux_channel = value;
/* move to mux node */
node = of_get_parent(parent);
of_node_put(parent);
err = of_property_read_u32(node, "reg", &value);
if (err) {
dev_err(dev, "mux address unavailable\n");
return err;
}
i2c_sensor->mux.mux_addr = value;
/* move to i2c bus node */
parent = of_get_parent(node);
of_node_put(node);
} else {
/* move to next parent to check
* if it is a gpio based i2c mux
*/
node = of_get_parent(parent);
if (of_device_is_compatible(node, "i2c-mux-gpio")) {
of_node_put(parent);
/* move to i2c bus node */
parent = of_parse_phandle(node, "i2c-parent", 0);
}
}
/* read parent which is i2c bus */
err = of_property_read_u32_index(parent, "reg", 1, &value);
if (err) {
dev_err(dev, "i2c bus regbase unavailable\n");
return err;
}
i2c_sensor->bus.reg_base = value;
err = of_property_read_u32(parent, "clock-frequency", &value);
if (err) {
dev_err(dev, "bus clock frequency unavailable\n");
return err;
}
i2c_sensor->bus.clk_rate = value;
of_node_put(parent);
/*
* Read any additional flags to configure I2C for any
* special properties of the device like-high-speed mode,
* 10bit addressing etc.,
*/
return 0;
}
static int sensor_common_init_spi_device_config(
struct device *dev, struct device_node *np,
struct sensor_cfg *cfg)
{
struct spi_sensor_cfg *spi_sensor = &cfg->u.spi_sensor;
struct device_node *parent = NULL;
int err = 0;
u32 value = 0;
cfg->type = CAMERA_DEVICE_SPI_SENSOR;
err = of_property_read_u32(np, "reg", &value);
if (err) {
dev_err(dev, "sensor address unavailable\n");
return err;
}
/* Reading more devices has to be supported */
spi_sensor->num_devs = 1;
spi_sensor->sd[0].addr = value;
parent = of_get_parent(np);
/* TODO: Add logic for spi mux if available */
/* read parent which is spi bus */
err = of_property_read_u32_index(parent, "reg", 1, &value);
if (err) {
dev_err(dev, "spi bus regbase unavailable\n");
return err;
}
spi_sensor->bus.reg_base = value;
err = of_property_read_u32(parent, "spi-max-frequency", &value);
if (err) {
dev_err(dev, "bus clock frequency unavailable\n");
return err;
}
spi_sensor->bus.clk_rate = value;
of_node_put(parent);
/* Read any additional flags to configure SPI */
return 0;
}
static int sensor_common_init_device_config(
struct device *dev, struct device_node *np,
struct sensor_cfg *cfg)
{
struct device_node *parent = NULL;
char *tmp;
int err = 0;
if (!np)
return -EINVAL;
parent = of_get_parent(np);
if (!parent)
return -EINVAL;
tmp = strnstr(parent->name, "i2c", 4);
if (tmp != NULL) {
err = sensor_common_init_i2c_device_config(dev, np, cfg);
if (err)
goto exit;
}
tmp = strnstr(parent->name, "spi", 4);
if (tmp != NULL) {
err = sensor_common_init_spi_device_config(dev, np, cfg);
if (err)
goto exit;
}
exit:
of_node_put(parent);
return err;
}
int sensor_common_init_sensor_properties(
struct device *dev, struct device_node *np,
struct sensor_properties *sensor)
{
char temp_str[OF_MAX_STR_LEN];
struct device_node *node = NULL;
int num_modes = 0;
int err, i;
if (sensor == NULL)
return -EINVAL;
err = sensor_common_init_device_config(dev, np, &sensor->cfg);
if (err)
return err;
/* get number of modes */
for (i = 0; num_modes < MAX_NUM_SENSOR_MODES; i++) {
err = snprintf(temp_str, sizeof(temp_str), "%s%d",
OF_SENSORMODE_PREFIX, i);
if (err < 0)
return -EINVAL;
node = of_get_child_by_name(np, temp_str);
of_node_put(node);
if (node == NULL)
break;
num_modes++;
}
sensor->num_modes = num_modes;
sensor->sensor_modes = devm_kzalloc(dev,
num_modes * sizeof(struct sensor_mode_properties),
GFP_KERNEL);
if (!sensor->sensor_modes) {
dev_err(dev, "Failed to allocate memory for sensor modes\n");
err = -ENOMEM;
goto alloc_fail;
}
for (i = 0; i < num_modes; i++) {
err = snprintf(temp_str, sizeof(temp_str), "%s%d",
OF_SENSORMODE_PREFIX, i);
if (err < 0)
return -EINVAL;
node = of_get_child_by_name(np, temp_str);
if (node == NULL) {
dev_err(dev, "Failed to find %s\n", temp_str);
err = -ENODATA;
goto fail;
};
dev_dbg(dev, "parsing for %s props\n", temp_str);
err = sensor_common_parse_signal_props(dev, node,
&sensor->sensor_modes[i].signal_properties);
if (err) {
dev_err(dev, "Failed to read %s signal props\n",
temp_str);
goto fail;
}
err = sensor_common_parse_image_props(dev, node,
&sensor->sensor_modes[i].image_properties);
if (err) {
dev_err(dev, "Failed to read %s image props\n",
temp_str);
goto fail;
}
err = sensor_common_parse_dv_timings(dev, node,
&sensor->sensor_modes[i].dv_timings);
if (err) {
dev_err(dev, "Failed to read %s DV timings\n",
temp_str);
goto fail;
}
err = sensor_common_parse_control_props(dev, node,
&sensor->sensor_modes[i].control_properties);
if (err) {
dev_err(dev, "Failed to read %s control props\n",
temp_str);
goto fail;
}
of_node_put(node);
}
return 0;
fail:
devm_kfree(dev, sensor->sensor_modes);
alloc_fail:
of_node_put(node);
return err;
}
EXPORT_SYMBOL(sensor_common_init_sensor_properties);

View File

@@ -0,0 +1,204 @@
// SPDX-License-Identifier: GPL-2.0
/*
* tegracam_core - tegra camera framework initialization
*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/types.h>
#include <media/tegra-v4l2-camera.h>
#include <media/tegracam_core.h>
struct tegracam_device_entry {
struct tegracam_device *tc_dev;
struct list_head list;
};
static struct list_head tc_device_list_head =
LIST_HEAD_INIT(tc_device_list_head);
static DEFINE_MUTEX(tc_device_list_mutex);
/* use semantic versioning convention */
#define TEGRACAM_MAJOR_VERSION 2
#define TEGRACAM_MINOR_VERSION 0
#define TEGRACAM_PATCH_VERSION 6
u32 tegracam_version(u8 major, u8 minor, u8 patch)
{
return ((major << 16) | (minor << 8) | patch);
}
EXPORT_SYMBOL_GPL(tegracam_version);
u32 tegracam_query_version(const char *of_dev_name)
{
struct tegracam_device_entry *entry = NULL;
struct device_node *node;
u32 version = 0;
if (of_dev_name == NULL)
return 0;
mutex_lock(&tc_device_list_mutex);
list_for_each_entry(entry, &tc_device_list_head, list) {
node = entry->tc_dev->dev->of_node;
if (strcmp(of_dev_name, node->name) == 0) {
version = entry->tc_dev->version;
break;
}
}
mutex_unlock(&tc_device_list_mutex);
return version;
}
EXPORT_SYMBOL_GPL(tegracam_query_version);
struct tegracam_device *to_tegracam_device(struct camera_common_data *data)
{
/* fix this by moving subdev to base struct */
return (struct tegracam_device *)data->tegracam_ctrl_hdl->tc_dev;
}
EXPORT_SYMBOL_GPL(to_tegracam_device);
void tegracam_set_privdata(struct tegracam_device * tc_dev, void *priv)
{
tc_dev->priv = priv;
/* TODO: cleanup needed for priv once sensors adapt this driver */
tc_dev->s_data->priv = priv;
}
EXPORT_SYMBOL_GPL(tegracam_set_privdata);
void *tegracam_get_privdata(struct tegracam_device *tc_dev)
{
return tc_dev->priv;
}
EXPORT_SYMBOL_GPL(tegracam_get_privdata);
int tegracam_device_register(struct tegracam_device *tc_dev)
{
struct device *dev = tc_dev->dev;
struct tegracam_ctrl_handler *ctrl_hdl = NULL;
struct tegracam_device_entry *tc_dev_entry = NULL;
struct camera_common_power_rail *pw_rail = NULL;
struct camera_common_data *s_data = NULL;
struct sensor_mode_properties *sensor_mode = NULL;
struct sensor_signal_properties *signal_props = NULL;
struct sensor_image_properties *image_props = NULL;
u32 mode_idx = 0;
int err = 0;
s_data = devm_kzalloc(dev,
sizeof(struct camera_common_data), GFP_KERNEL);
s_data->dev = dev;
ctrl_hdl = devm_kzalloc(dev,
sizeof(struct tegracam_ctrl_handler), GFP_KERNEL);
ctrl_hdl->tc_dev = tc_dev;
s_data->tegracam_ctrl_hdl = ctrl_hdl;
pw_rail = devm_kzalloc(dev,
sizeof(struct camera_common_power_rail), GFP_KERNEL);
s_data->power = pw_rail;
s_data->regmap = devm_regmap_init_i2c(tc_dev->client,
tc_dev->dev_regmap_config);
if (IS_ERR(s_data->regmap)) {
dev_err(dev,
"regmap init failed: %ld\n", PTR_ERR(s_data->regmap));
return -ENODEV;
}
if (!tc_dev->sensor_ops) {
dev_err(dev, "sensor ops not initialized\n");
return -EINVAL;
}
s_data->ops = tc_dev->sensor_ops;
s_data->pdata = tc_dev->sensor_ops->parse_dt(tc_dev);
if (PTR_ERR(s_data->pdata) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!s_data->pdata) {
dev_err(dev, "unable to get platform data\n");
return -EFAULT;
}
tc_dev->s_data = s_data;
err = tc_dev->sensor_ops->power_get(tc_dev);
if (err) {
dev_err(dev, "unable to power get\n");
return -EFAULT;
}
err = camera_common_initialize(s_data, tc_dev->name);
if (err) {
dev_err(dev, "Failed to initialize %s\n", tc_dev->name);
return err;
}
/* TODO: updated default mode from DT ?? */
mode_idx = s_data->mode_prop_idx = 0;
/* init format context */
/*TODO: compile frmfmt array from DT */
s_data->frmfmt = tc_dev->sensor_ops->frmfmt_table;
s_data->numfmts = tc_dev->sensor_ops->numfrmfmts;
sensor_mode = &s_data->sensor_props.sensor_modes[mode_idx];
signal_props = &sensor_mode->signal_properties;
image_props = &sensor_mode->image_properties;
s_data->def_mode = s_data->frmfmt[mode_idx].mode;
s_data->colorfmt =
camera_common_find_pixelfmt(image_props->pixel_format);
s_data->def_width = s_data->fmt_width =
s_data->frmfmt[mode_idx].size.width;
s_data->def_height = s_data->fmt_height =
s_data->frmfmt[mode_idx].size.height;
s_data->def_clk_freq = signal_props->mclk_freq * 1000;
/* add version info to identify the right feature set */
tc_dev->version = tegracam_version(TEGRACAM_MAJOR_VERSION,
TEGRACAM_MINOR_VERSION, TEGRACAM_PATCH_VERSION);
s_data->version = tc_dev->version;
/* Add tc_dev to list of registered devices */
tc_dev_entry = devm_kzalloc(dev,
sizeof(*tc_dev_entry), GFP_KERNEL);
tc_dev_entry->tc_dev = tc_dev;
INIT_LIST_HEAD(&tc_dev_entry->list);
mutex_lock(&tc_device_list_mutex);
list_add(&tc_dev_entry->list, &tc_device_list_head);
mutex_unlock(&tc_device_list_mutex);
dev_info(dev, "tegracam sensor driver:%s_v%d.%d.%d\n",
tc_dev->name, TEGRACAM_MAJOR_VERSION,
TEGRACAM_MINOR_VERSION, TEGRACAM_PATCH_VERSION);
return 0;
}
EXPORT_SYMBOL_GPL(tegracam_device_register);
void tegracam_device_unregister(struct tegracam_device *tc_dev)
{
struct tegracam_device_entry *entry;
struct tegracam_device_entry *temp;
struct camera_common_data *s_data = tc_dev->s_data;
tc_dev->sensor_ops->power_put(tc_dev);
camera_common_cleanup(s_data);
/* Remove tc_dev from list of registered devices */
mutex_lock(&tc_device_list_mutex);
list_for_each_entry_safe(entry, temp, &tc_device_list_head, list) {
if (entry->tc_dev == tc_dev) {
list_del(&entry->list);
break;
}
}
mutex_unlock(&tc_device_list_mutex);
devm_kfree(tc_dev->dev, entry);
devm_kfree(tc_dev->dev, tc_dev->s_data->tegracam_ctrl_hdl);
devm_kfree(tc_dev->dev, tc_dev->s_data->power);
devm_kfree(tc_dev->dev, tc_dev->s_data);
tc_dev->s_data = NULL;
}
EXPORT_SYMBOL_GPL(tegracam_device_unregister);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,249 @@
// SPDX-License-Identifier: GPL-2.0
/*
* tegracam_utils - tegra camera framework utilities
*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/types.h>
#include <linux/regmap.h>
#include <media/tegracam_core.h>
#include <media/tegracam_utils.h>
bool is_tvcf_supported(u32 version)
{
/* 2.0.0 is the base tvcf version sensor driver*/
return (version >= tegracam_version(2, 0, 0) ? true : false);
}
EXPORT_SYMBOL_GPL(is_tvcf_supported);
int format_tvcf_version(u32 version, char *buff, size_t size)
{
if (buff == NULL)
return -EINVAL;
return snprintf(buff, size, "%u.%u.%u",
(u8)(version >> 16),
(u8)(version >> 8),
(u8)(version));
}
EXPORT_SYMBOL_GPL(format_tvcf_version);
void conv_u32_u8arr(u32 input, u8 *output)
{
output[0] = (input >> 24) & 0xFF;
output[1] = (input >> 16) & 0xFF;
output[2] = (input >> 8) & 0xFF;
output[3] = input & 0xFF;
}
EXPORT_SYMBOL_GPL(conv_u32_u8arr);
void conv_u16_u8arr(u16 input, u8 *output)
{
output[0] = (input >> 8) & 0xFF;
output[1] = input & 0xFF;
}
EXPORT_SYMBOL_GPL(conv_u16_u8arr);
static inline int is_valid_blob(struct sensor_blob *blob, u32 size)
{
if (!blob)
return -EINVAL;
if ((blob->num_cmds >= MAX_COMMANDS) ||
((blob->buf_size + size) >= MAX_BLOB_SIZE))
return -ENOMEM;
return 0;
}
int prepare_write_cmd(struct sensor_blob *blob,
u32 size, u32 addr, u8 *buf)
{
struct sensor_cmd *cmd = NULL;
int err = 0;
err = is_valid_blob(blob, size);
if (err)
return err;
cmd = &blob->cmds[blob->num_cmds++];
cmd->opcode = ((SENSOR_OPCODE_WRITE << 24) | size);
cmd->addr = addr;
memcpy(&blob->buf[blob->buf_size], buf, size);
blob->buf_size += size;
return 0;
}
EXPORT_SYMBOL_GPL(prepare_write_cmd);
int prepare_read_cmd(struct sensor_blob *blob,
u32 size, u32 addr)
{
struct sensor_cmd *cmd = NULL;
int err = 0;
err = is_valid_blob(blob, size);
if (err)
return err;
cmd = &blob->cmds[blob->num_cmds++];
cmd->opcode = ((SENSOR_OPCODE_READ << 24) | size);
cmd->addr = addr;
blob->buf_size += size;
return 0;
}
EXPORT_SYMBOL_GPL(prepare_read_cmd);
int prepare_sleep_cmd(struct sensor_blob *blob, u32 time_in_us)
{
struct sensor_cmd *cmd = NULL;
int err = 0;
err = is_valid_blob(blob, 0);
if (err)
return err;
cmd = &blob->cmds[blob->num_cmds++];
cmd->opcode = (SENSOR_OPCODE_SLEEP << 24) | time_in_us;
return 0;
}
EXPORT_SYMBOL_GPL(prepare_sleep_cmd);
int prepare_done_cmd(struct sensor_blob *blob)
{
struct sensor_cmd *cmd = NULL;
int err = 0;
err = is_valid_blob(blob, 0);
if (err)
return err;
cmd = &blob->cmds[blob->num_cmds++];
cmd->opcode = SENSOR_OPCODE_DONE;
return 0;
}
EXPORT_SYMBOL_GPL(prepare_done_cmd);
int convert_table_to_blob(struct sensor_blob *blob,
const struct reg_8 table[],
u16 wait_ms_addr, u16 end_addr)
{
const struct reg_8 *next;
u16 addr;
u8 val;
int range_start = -1;
u32 range_count = 0;
u8 buf[16];
for (next = table;; next++) {
val = next->val;
addr = next->addr;
if (range_start == -1)
range_start = next->addr;
if (range_count == 16 ||
(addr != (range_start + range_count))) {
/* write opcode and size for store index*/
prepare_write_cmd(blob, range_count,
range_start, &buf[0]);
range_start = addr;
range_count = 0;
}
/* Done command must be added by client */
if (addr == end_addr)
break;
if (addr == wait_ms_addr) {
prepare_sleep_cmd(blob, (next->val * 1000));
range_start = -1;
continue;
}
buf[range_count++] = val;
}
return 0;
}
EXPORT_SYMBOL_GPL(convert_table_to_blob);
int write_sensor_blob(struct regmap *regmap, struct sensor_blob *blob)
{
int err = 0;
int cmd_idx = 0;
int buf_index = 0;
while (cmd_idx < blob->num_cmds) {
struct sensor_cmd *cmd = &blob->cmds[cmd_idx++];
u32 val;
val = cmd->opcode;
if ((val >> 24) == SENSOR_OPCODE_DONE)
break;
if ((val >> 24) == SENSOR_OPCODE_SLEEP) {
val = val & 0x00FFFFFF;
usleep_range(val, val + 10);
continue;
}
if ((val >> 24) == SENSOR_OPCODE_WRITE) {
int size = val & 0x00FFFFFF;
err = regmap_bulk_write(regmap, cmd->addr,
&blob->buf[buf_index], size);
if (err)
return err;
buf_index += size;
} else {
pr_err("blob has been packaged with errors\n");
return -EINVAL;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(write_sensor_blob);
int tegracam_write_blobs(struct tegracam_ctrl_handler *hdl)
{
struct camera_common_data *s_data = hdl->tc_dev->s_data;
struct tegracam_sensor_data *sensor_data = &hdl->sensor_data;
struct sensor_blob *ctrl_blob = &sensor_data->ctrls_blob;
struct sensor_blob *mode_blob = &sensor_data->mode_blob;
const struct tegracam_ctrl_ops *ops = hdl->ctrl_ops;
int err = 0;
/* no blob control available */
if (ops == NULL || !ops->is_blob_supported)
return 0;
/*
* TODO: Extend this to multiple subdevices
* mode blob commands can be zero for auto control updates
* and stop streaming cases
*/
if (mode_blob->num_cmds) {
err = write_sensor_blob(s_data->regmap, mode_blob);
if (err) {
dev_err(s_data->dev, "Error writing mode blob\n");
return err;
}
}
err = write_sensor_blob(s_data->regmap, ctrl_blob);
if (err) {
dev_err(s_data->dev, "Error writing control blob\n");
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(tegracam_write_blobs);

View File

@@ -0,0 +1,252 @@
// SPDX-License-Identifier: GPL-2.0
/*
* tegracam_v4l2 - tegra camera framework for v4l2 support
*
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/types.h>
#include <media/tegra-v4l2-camera.h>
#include <media/tegracam_core.h>
#include <media/tegracam_utils.h>
static int v4l2sd_stream(struct v4l2_subdev *sd, int enable)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct camera_common_data *s_data = to_camera_common_data(&client->dev);
struct camera_common_sensor_ops *sensor_ops;
struct tegracam_device *tc_dev;
struct tegracam_sensor_data *sensor_data;
struct sensor_blob *ctrl_blob;
struct sensor_blob *mode_blob;
int err = 0;
dev_dbg(&client->dev, "%s++ enable %d\n", __func__, enable);
if (!s_data)
return -EINVAL;
sensor_ops = s_data->ops;
tc_dev = to_tegracam_device(s_data);
sensor_data = &s_data->tegracam_ctrl_hdl->sensor_data;
ctrl_blob = &sensor_data->ctrls_blob;
mode_blob = &sensor_data->mode_blob;
/* reset control packet at start/stop streaming */
memset(ctrl_blob, 0, sizeof(struct sensor_blob));
memset(mode_blob, 0, sizeof(struct sensor_blob));
if (enable) {
/* increase ref count so module can't be unloaded */
if (!try_module_get(s_data->owner))
return -ENODEV;
err = sensor_ops->set_mode(tc_dev);
if (err) {
dev_err(&client->dev, "Error writing mode\n");
goto error;
}
/* update control ranges based on mode settings*/
err = tegracam_init_ctrl_ranges_by_mode(
s_data->tegracam_ctrl_hdl, (u32) s_data->mode);
if (err) {
dev_err(&client->dev, "Error updating control ranges\n");
goto error;
}
if (s_data->override_enable) {
err = tegracam_ctrl_set_overrides(
s_data->tegracam_ctrl_hdl);
if (err) {
dev_err(&client->dev,
"overrides cannot be set\n");
goto error;
}
}
err = sensor_ops->start_streaming(tc_dev);
if (err) {
dev_err(&client->dev, "Error turning on streaming\n");
goto error;
}
/* add done command for blobs */
prepare_done_cmd(mode_blob);
prepare_done_cmd(ctrl_blob);
tc_dev->is_streaming = true;
} else {
err = sensor_ops->stop_streaming(tc_dev);
if (err) {
dev_err(&client->dev, "Error turning off streaming\n");
goto error;
}
/* add done command for blob */
prepare_done_cmd(ctrl_blob);
tc_dev->is_streaming = false;
module_put(s_data->owner);
}
return 0;
error:
module_put(s_data->owner);
return err;
}
static int v4l2sd_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct camera_common_data *s_data = to_camera_common_data(&client->dev);
struct camera_common_power_rail *pw;
if (!s_data)
return -EINVAL;
pw = s_data->power;
*status = pw->state == SWITCH_ON;
return 0;
}
static struct v4l2_subdev_video_ops v4l2sd_video_ops = {
.s_stream = v4l2sd_stream,
.g_input_status = v4l2sd_g_input_status,
};
static struct v4l2_subdev_core_ops v4l2sd_core_ops = {
.s_power = camera_common_s_power,
};
static int v4l2sd_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
return camera_common_g_fmt(sd, &format->format);
}
static int v4l2sd_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct camera_common_data *s_data = to_camera_common_data(&client->dev);
int ret;
if (!s_data)
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
ret = camera_common_try_fmt(sd, &format->format);
else {
ret = camera_common_s_fmt(sd, &format->format);
if (ret == 0) {
/* update control ranges based on mode settings*/
ret = tegracam_init_ctrl_ranges_by_mode(
s_data->tegracam_ctrl_hdl, (u32) s_data->mode);
if (ret) {
dev_err(&client->dev, "Error updating control ranges %d\n", ret);
return ret;
}
}
}
/* TODO: Add set mode for blob collection */
return ret;
}
static struct v4l2_subdev_pad_ops v4l2sd_pad_ops = {
.set_fmt = v4l2sd_set_fmt,
.get_fmt = v4l2sd_get_fmt,
.enum_mbus_code = camera_common_enum_mbus_code,
.enum_frame_size = camera_common_enum_framesizes,
.enum_frame_interval = camera_common_enum_frameintervals,
.get_mbus_config = camera_common_get_mbus_config,
};
static struct v4l2_subdev_ops v4l2sd_ops = {
.core = &v4l2sd_core_ops,
.video = &v4l2sd_video_ops,
.pad = &v4l2sd_pad_ops,
};
static const struct media_entity_operations media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
int tegracam_v4l2subdev_register(struct tegracam_device *tc_dev,
bool is_sensor)
{
struct camera_common_data *s_data = tc_dev->s_data;
struct tegracam_ctrl_handler *ctrl_hdl;
struct v4l2_subdev *sd = NULL;
struct device *dev = tc_dev->dev;
int err = 0;
if (!s_data)
return -EINVAL;
ctrl_hdl = s_data->tegracam_ctrl_hdl;
/* init v4l2 subdevice for registration */
sd = &s_data->subdev;
if (!sd || !tc_dev->client) {
dev_err(dev, "Invalid subdev context\n");
return -ENODEV;
}
v4l2_i2c_subdev_init(sd, tc_dev->client, &v4l2sd_ops);
ctrl_hdl->ctrl_ops = tc_dev->tcctrl_ops;
err = tegracam_ctrl_handler_init(ctrl_hdl);
if (err) {
dev_err(dev, "Failed to init ctrls %s\n", tc_dev->name);
return err;
}
if (ctrl_hdl->ctrl_ops != NULL)
tc_dev->numctrls = ctrl_hdl->ctrl_ops->numctrls;
else
tc_dev->numctrls = 0;
s_data->numctrls = tc_dev->numctrls;
sd->ctrl_handler = s_data->ctrl_handler = &ctrl_hdl->ctrl_handler;
s_data->ctrls = ctrl_hdl->ctrls;
sd->internal_ops = tc_dev->v4l2sd_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
s_data->owner = sd->owner;
/* Set owner to NULL so we can unload the driver module */
sd->owner = NULL;
#if defined(CONFIG_MEDIA_CONTROLLER)
tc_dev->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.ops = &media_ops;
err = tegra_media_entity_init(&sd->entity,
1, &tc_dev->pad, true, is_sensor);
if (err < 0) {
dev_err(dev, "unable to init media entity\n");
return err;
}
#endif
return v4l2_async_register_subdev(sd);
}
EXPORT_SYMBOL_GPL(tegracam_v4l2subdev_register);
void tegracam_v4l2subdev_unregister(struct tegracam_device *tc_dev)
{
struct camera_common_data *s_data = tc_dev->s_data;
struct v4l2_subdev *sd;
if (!s_data)
return;
sd = &s_data->subdev;
v4l2_ctrl_handler_free(s_data->ctrl_handler);
v4l2_async_unregister_subdev(sd);
#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity);
#endif
}
EXPORT_SYMBOL_GPL(tegracam_v4l2subdev_unregister);

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,184 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NVIDIA Tegra Video Input Device Driver Core Helpers
*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/nospec.h>
#include <linux/platform_device.h>
#include <linux/arm64-barrier.h>
#include <media/mc_common.h>
static const struct tegra_video_format tegra_default_format[] = {
{
TEGRA_VF_DEF,
10,
MEDIA_BUS_FMT_SRGGB10_1X10,
{2, 1},
TEGRA_IMAGE_FORMAT_DEF,
TEGRA_IMAGE_DT_RAW10,
V4L2_PIX_FMT_SRGGB10,
"RGRG.. GBGB..",
},
};
/* -----------------------------------------------------------------------------
* Helper functions
*/
/**
* tegra_core_get_fourcc_by_idx - get fourcc of a tegra_video format
* @index: array index of the tegra_video_formats
*
* Return: fourcc code
*/
u32 tegra_core_get_fourcc_by_idx(struct tegra_channel *chan,
unsigned int index)
{
/* return default fourcc format if the index out of bounds */
if (index > (chan->num_video_formats - 1))
return V4L2_PIX_FMT_SGRBG10;
index = array_index_nospec(index, chan->num_video_formats);
return chan->video_formats[index]->fourcc;
}
EXPORT_SYMBOL(tegra_core_get_fourcc_by_idx);
/**
* tegra_core_get_word_count - Calculate word count
* @frame_width: number of pixels per line
* @fmt: Tegra Video format struct which has BPP information
*
* Return: word count number
*/
u32 tegra_core_get_word_count(unsigned int frame_width,
const struct tegra_video_format *fmt)
{
return frame_width * fmt->width / 8;
}
/**
* tegra_core_get_idx_by_code - Retrieve index for a media bus code
* @code: the format media bus code
*
* Return: a index to the format information structure corresponding to the
* given V4L2 media bus format @code, or -1 if no corresponding format can
* be found.
*/
int tegra_core_get_idx_by_code(struct tegra_channel *chan,
unsigned int code, unsigned offset)
{
unsigned int i;
for (i = offset; i < chan->num_video_formats; ++i) {
if (chan->video_formats[i]->code == code)
return i;
}
return -1;
}
EXPORT_SYMBOL(tegra_core_get_idx_by_code);
/**
* tegra_core_get_code_by_fourcc - Retrieve media bus code for fourcc
* @fourcc: the format 4CC
*
* Return: media bus code format information structure corresponding to the
* given V4L2 fourcc @fourcc, or -1 if no corresponding format found.
*/
int tegra_core_get_code_by_fourcc(struct tegra_channel *chan,
unsigned int fourcc, unsigned offset)
{
unsigned int i;
for (i = offset; i < chan->num_video_formats; ++i) {
if (chan->video_formats[i]->fourcc == fourcc)
return chan->video_formats[i]->code;
}
spec_bar();
return -1;
}
EXPORT_SYMBOL(tegra_core_get_code_by_fourcc);
/**
* tegra_core_get_default_format - Get default format
*
* Return: pointer to the format where the default format needs
* to be filled in.
*/
const struct tegra_video_format *tegra_core_get_default_format(void)
{
return &tegra_default_format[0];
}
EXPORT_SYMBOL(tegra_core_get_default_format);
/**
* tegra_core_get_format_by_code - Retrieve format information for a media
* bus code
* @code: the format media bus code
*
* Return: a pointer to the format information structure corresponding to the
* given V4L2 media bus format @code, or NULL if no corresponding format can
* be found.
*/
const struct tegra_video_format *
tegra_core_get_format_by_code(struct tegra_channel *chan,
unsigned int code, unsigned offset)
{
unsigned int i;
for (i = offset; i < chan->num_video_formats; ++i) {
if (chan->video_formats[i]->code == code)
return chan->video_formats[i];
}
spec_bar();
return NULL;
}
EXPORT_SYMBOL(tegra_core_get_format_by_code);
/**
* tegra_core_get_format_by_fourcc - Retrieve format information for a 4CC
* @fourcc: the format 4CC
*
* Return: a pointer to the format information structure corresponding to the
* given V4L2 format @fourcc, or NULL if no corresponding format can be
* found.
*/
const struct tegra_video_format *
tegra_core_get_format_by_fourcc(struct tegra_channel *chan, u32 fourcc)
{
unsigned int i;
for (i = 0; i < chan->num_video_formats; ++i) {
if (chan->video_formats[i]->fourcc == fourcc)
return chan->video_formats[i];
}
spec_bar();
return NULL;
}
EXPORT_SYMBOL(tegra_core_get_format_by_fourcc);
/**
* tegra_core_bytes_per_line - Calculate bytes per line in one frame
* @width: frame width
* @align: number of alignment bytes
* @fmt: Tegra Video format
*
* Simply calcualte the bytes_per_line and if it's not aligned it
* will be padded to alignment boundary.
*/
u32 tegra_core_bytes_per_line(unsigned int width, unsigned int align,
const struct tegra_video_format *fmt)
{
u32 value = ((width * fmt->bpp.numerator) / fmt->bpp.denominator);
return roundup(value, align);
}
EXPORT_SYMBOL(tegra_core_bytes_per_line);

View File

@@ -0,0 +1,657 @@
// SPDX-License-Identifier: GPL-2.0
/*
* NVIDIA Media controller graph management
*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <soc/tegra/fuse.h>
#include <media/media-device.h>
#include <media/v4l2-async.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include <media/tegra_v4l2_camera.h>
#include <media/mc_common.h>
#include <media/csi.h>
#include "nvcsi/nvcsi.h"
/* -----------------------------------------------------------------------------
* Graph Management
*/
static struct tegra_vi_graph_entity *
tegra_vi_graph_find_entity(struct tegra_channel *chan,
const struct device_node *node)
{
struct tegra_vi_graph_entity *entity;
list_for_each_entry(entity, &chan->entities, list) {
if (entity->node == node)
return entity;
}
return NULL;
}
static int tegra_vi_graph_build_one(struct tegra_channel *chan,
struct tegra_vi_graph_entity *entity)
{
u32 link_flags = MEDIA_LNK_FL_ENABLED;
struct media_entity *local;
struct media_entity *remote;
struct media_pad *local_pad;
struct media_pad *remote_pad;
struct tegra_vi_graph_entity *ent;
struct v4l2_fwnode_link link;
struct device_node *ep = NULL;
struct device_node *next;
int ret = 0;
if (!entity->subdev) {
dev_err(chan->vi->dev, "%s:No subdev under entity, skip linking\n",
__func__);
return 0;
}
local = entity->entity;
dev_dbg(chan->vi->dev, "creating links for entity %s\n", local->name);
do {
/* Get the next endpoint and parse its link. */
next = of_graph_get_next_endpoint(entity->node, ep);
if (next == NULL)
break;
ep = next;
dev_dbg(chan->vi->dev, "processing endpoint %pOF\n",
ep);
ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
if (ret < 0) {
dev_err(chan->vi->dev,
"failed to parse link for %pOF\n", ep);
continue;
}
if (link.local_port >= local->num_pads) {
dev_err(chan->vi->dev,
"invalid port number %u for %pOF\n",
link.local_port, to_of_node(link.local_node));
v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
local_pad = &local->pads[link.local_port];
/* Skip sink ports, they will be processed from the other end of
* the link.
*/
if (local_pad->flags & MEDIA_PAD_FL_SINK) {
dev_dbg(chan->vi->dev, "skipping sink port %pOF:%u\n",
to_of_node(link.local_node), link.local_port);
v4l2_fwnode_put_link(&link);
continue;
}
/* Skip channel entity , they will be processed separately. */
if (link.remote_node == of_fwnode_handle(chan->vi->dev->of_node)) {
dev_dbg(chan->vi->dev, "skipping channel port %pOF:%u\n",
to_of_node(link.local_node), link.local_port);
v4l2_fwnode_put_link(&link);
continue;
}
/* Find the remote entity. */
ent = tegra_vi_graph_find_entity(chan, to_of_node(link.remote_node));
if (ent == NULL) {
dev_err(chan->vi->dev, "no entity found for %pOF\n",
to_of_node(link.remote_node));
v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
remote = ent->entity;
if (link.remote_port >= remote->num_pads) {
dev_err(chan->vi->dev, "invalid port number %u on %pOF\n",
link.remote_port, to_of_node(link.remote_node));
v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
remote_pad = &remote->pads[link.remote_port];
v4l2_fwnode_put_link(&link);
/* Create the media link. */
dev_dbg(chan->vi->dev, "creating %s:%u -> %s:%u link\n",
local->name, local_pad->index,
remote->name, remote_pad->index);
ret = tegra_media_create_link(local, local_pad->index, remote,
remote_pad->index, link_flags);
if (ret < 0) {
dev_err(chan->vi->dev,
"failed to create %s:%u -> %s:%u link\n",
local->name, local_pad->index,
remote->name, remote_pad->index);
break;
}
} while (next);
return ret;
}
static int tegra_vi_graph_build_links(struct tegra_channel *chan)
{
u32 link_flags = MEDIA_LNK_FL_ENABLED;
struct media_entity *source;
struct media_entity *sink;
struct media_pad *source_pad;
struct media_pad *sink_pad;
struct tegra_vi_graph_entity *ent;
struct v4l2_fwnode_link link;
struct device_node *ep = NULL;
int ret = 0;
dev_dbg(chan->vi->dev, "creating links for channels\n");
/* Device not registered */
if (!chan->init_done)
return -EINVAL;
ep = chan->endpoint_node;
dev_dbg(chan->vi->dev, "processing endpoint %pOF\n", ep);
ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
if (ret < 0) {
dev_err(chan->vi->dev, "failed to parse link for %pOF\n",
ep);
return -EINVAL;
}
if (link.local_port >= chan->vi->num_channels) {
dev_err(chan->vi->dev, "wrong channel number for port %u\n",
link.local_port);
v4l2_fwnode_put_link(&link);
return -EINVAL;
}
dev_dbg(chan->vi->dev, "creating link for channel %s\n",
chan->video->name);
/* Find the remote entity. */
ent = tegra_vi_graph_find_entity(chan, to_of_node(link.remote_node));
if (ent == NULL) {
dev_err(chan->vi->dev, "no entity found for %pOF\n",
to_of_node(link.remote_node));
v4l2_fwnode_put_link(&link);
return -EINVAL;
}
if (ent->entity == NULL) {
dev_err(chan->vi->dev, "entity not bounded %pOF\n",
to_of_node(link.remote_node));
v4l2_fwnode_put_link(&link);
return -EINVAL;
}
source = ent->entity;
source_pad = &source->pads[link.remote_port];
sink = &chan->video->entity;
sink_pad = &chan->pad;
v4l2_fwnode_put_link(&link);
/* Create the media link. */
dev_dbg(chan->vi->dev, "creating %s:%u -> %s:%u link\n",
source->name, source_pad->index,
sink->name, sink_pad->index);
ret = tegra_media_create_link(source, source_pad->index,
sink, sink_pad->index,
link_flags);
if (ret < 0) {
dev_err(chan->vi->dev,
"failed to create %s:%u -> %s:%u link\n",
source->name, source_pad->index,
sink->name, sink_pad->index);
return -EINVAL;
}
ret = tegra_channel_init_subdevices(chan);
if (ret < 0) {
dev_err(chan->vi->dev, "Failed to initialize sub-devices\n");
return -EINVAL;
}
return 0;
}
static void tegra_vi_graph_remove_links(struct tegra_channel *chan)
{
struct tegra_vi_graph_entity *entity;
/* remove entity links and subdev for nvcsi */
entity = list_first_entry(&chan->entities,
struct tegra_vi_graph_entity, list);
if (entity->entity != NULL) {
media_entity_remove_links(entity->entity);
video_unregister_device(entity->subdev->devnode);
}
/* remove video node for vi */
tegra_channel_remove_subdevices(chan);
}
static int tegra_vi_graph_notify_complete(struct v4l2_async_notifier *notifier)
{
struct tegra_channel *chan =
container_of(notifier, struct tegra_channel, notifier);
struct tegra_vi_graph_entity *entity;
int ret;
dev_dbg(chan->vi->dev, "notify complete, all subdevs registered\n");
/* Allocate video_device */
ret = tegra_channel_init_video(chan);
if (ret < 0) {
dev_err(chan->vi->dev, "failed to allocate video device %s\n",
chan->video->name);
return ret;
}
ret = video_register_device(chan->video, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(chan->vi->dev, "failed to register %s\n",
chan->video->name);
goto register_device_error;
}
/* Create links for every entity. */
list_for_each_entry(entity, &chan->entities, list) {
if (entity->entity != NULL) {
ret = tegra_vi_graph_build_one(chan, entity);
if (ret < 0)
goto graph_error;
}
}
/* Create links for channels */
ret = tegra_vi_graph_build_links(chan);
if (ret < 0)
goto graph_error;
ret = v4l2_device_register_subdev_nodes(&chan->vi->v4l2_dev);
if (ret < 0) {
dev_err(chan->vi->dev, "failed to register subdev nodes\n");
goto graph_error;
}
chan->link_status++;
return 0;
graph_error:
video_unregister_device(chan->video);
register_device_error:
video_device_release(chan->video);
return ret;
}
static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
{
struct tegra_channel *chan =
container_of(notifier, struct tegra_channel, notifier);
struct tegra_vi_graph_entity *entity;
/* Locate the entity corresponding to the bound subdev and store the
* subdev pointer.
*/
list_for_each_entry(entity, &chan->entities, list) {
if (entity->node != to_of_node(subdev->dev->fwnode) &&
entity->node != to_of_node(subdev->fwnode))
continue;
if (entity->subdev) {
dev_err(chan->vi->dev, "duplicate subdev for node %pOF\n",
entity->node);
return -EINVAL;
}
dev_info(chan->vi->dev, "subdev %s bound\n", subdev->name);
entity->entity = &subdev->entity;
entity->subdev = subdev;
chan->subdevs_bound++;
return 0;
}
dev_err(chan->vi->dev, "no entity for subdev %s\n", subdev->name);
return -EINVAL;
}
static void tegra_vi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
{
struct tegra_channel *chan =
container_of(notifier, struct tegra_channel, notifier);
struct tegra_vi_graph_entity *entity;
/* cleanup for complete */
if (chan->link_status) {
tegra_vi_graph_remove_links(chan);
tegra_channel_cleanup_video(chan);
chan->link_status--;
}
/* cleanup for bound */
list_for_each_entry(entity, &chan->entities, list) {
if (entity->subdev == subdev) {
/* remove subdev node */
chan->subdevs_bound--;
entity->subdev = NULL;
entity->entity = NULL;
dev_info(chan->vi->dev, "subdev %s unbind\n",
subdev->name);
break;
}
}
}
void tegra_vi_graph_cleanup(struct tegra_mc_vi *vi)
{
struct tegra_vi_graph_entity *entityp;
struct tegra_vi_graph_entity *entity;
struct tegra_channel *chan;
list_for_each_entry(chan, &vi->vi_chans, list) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)
v4l2_async_notifier_unregister(&chan->notifier);
#else
v4l2_async_nf_unregister(&chan->notifier);
#endif
list_for_each_entry_safe(entity, entityp,
&chan->entities, list) {
of_node_put(entity->node);
list_del(&entity->list);
}
}
}
EXPORT_SYMBOL(tegra_vi_graph_cleanup);
static int tegra_vi_graph_parse_one(struct tegra_channel *chan,
struct device_node *node)
{
struct device_node *ep = NULL;
struct device_node *next;
struct device_node *remote = NULL;
struct tegra_vi_graph_entity *entity;
int ret = 0;
dev_dbg(chan->vi->dev, "parsing node %s\n", node->full_name);
/* Parse all the remote entities and put them into the list */
do {
next = of_graph_get_next_endpoint(node, ep);
if (next == NULL || !of_device_is_available(next))
break;
ep = next;
dev_dbg(chan->vi->dev, "handling endpoint %s\n", ep->full_name);
remote = of_graph_get_remote_port_parent(ep);
if (!remote) {
ret = -EINVAL;
break;
}
/* skip the vi of_node and duplicated entities */
if (remote == chan->vi->dev->of_node ||
tegra_vi_graph_find_entity(chan, remote) ||
!of_device_is_available(remote))
continue;
entity = devm_kzalloc(chan->vi->dev, sizeof(*entity),
GFP_KERNEL);
if (entity == NULL) {
ret = -ENOMEM;
break;
}
entity->node = remote;
entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
entity->asd.match.fwnode = of_fwnode_handle(remote);
list_add_tail(&entity->list, &chan->entities);
chan->num_subdevs++;
/* Find remote entities, which are linked to this entity */
ret = tegra_vi_graph_parse_one(chan, entity->node);
if (ret < 0)
break;
} while (next);
return ret;
}
int tegra_vi_tpg_graph_init(struct tegra_mc_vi *mc_vi)
{
int err = 0;
u32 link_flags = MEDIA_LNK_FL_ENABLED;
struct tegra_csi_device *csi = mc_vi->csi;
struct tegra_channel *vi_it;
struct tegra_csi_channel *csi_it;
if (!csi) {
dev_err(mc_vi->dev, "CSI is NULL\n");
return -EINVAL;
}
mc_vi->num_subdevs = mc_vi->num_channels;
vi_it = mc_vi->tpg_start;
csi_it = csi->tpg_start;
list_for_each_entry_from(vi_it, &mc_vi->vi_chans, list) {
/* Device not registered */
if (!vi_it->init_done)
continue;
list_for_each_entry_from(csi_it, &csi->csi_chans, list) {
struct media_entity *source = &csi_it->subdev.entity;
struct media_entity *sink = &vi_it->video->entity;
struct media_pad *source_pad = csi_it->pads;
struct media_pad *sink_pad = &vi_it->pad;
vi_it->bypass = 0;
err = v4l2_device_register_subdev(&mc_vi->v4l2_dev,
&csi_it->subdev);
if (err) {
dev_err(mc_vi->dev,
"%s:Fail to register subdev\n",
__func__);
goto register_fail;
}
dev_dbg(mc_vi->dev, "creating %s:%u -> %s:%u link\n",
source->name, source_pad->index,
sink->name, sink_pad->index);
err = tegra_media_create_link(source, source_pad->index,
sink, sink_pad->index, link_flags);
if (err < 0) {
dev_err(mc_vi->dev,
"failed to create %s:%u -> %s:%u link\n",
source->name, source_pad->index,
sink->name, sink_pad->index);
goto register_fail;
}
err = tegra_channel_init_subdevices(vi_it);
if (err) {
dev_err(mc_vi->dev,
"%s:Init subdevice error\n", __func__);
goto register_fail;
}
csi_it = list_next_entry(csi_it, list);
break;
}
}
return 0;
register_fail:
csi_it = csi->tpg_start;
list_for_each_entry_from(csi_it, &csi->csi_chans, list)
v4l2_device_unregister_subdev(&csi_it->subdev);
return err;
}
EXPORT_SYMBOL(tegra_vi_tpg_graph_init);
int tegra_vi_graph_init(struct tegra_mc_vi *vi)
{
struct tegra_vi_graph_entity *entity;
unsigned int num_subdevs = 0;
int ret = 0, i;
struct device_node *ep = NULL;
struct device_node *next;
struct device_node *remote = NULL;
struct tegra_channel *chan;
static const struct v4l2_async_notifier_operations vi_chan_notify_ops = {
.bound = tegra_vi_graph_notify_bound,
.complete = tegra_vi_graph_notify_complete,
.unbind = tegra_vi_graph_notify_unbind,
};
/*
* Walk the links to parse the full graph. Each struct tegra_channel
* in vi->vi_chans points to each endpoint of the composite node.
* Thus parse the remote entity for each endpoint in turn.
* Each channel will register a v4l2 async notifier, this makes graph
* init independent between vi_chans. There we can skip the current
* channel in case of something wrong during graph parsing and try
* the next channel. Return error only if memory allocation is failed.
*/
chan = list_first_entry(&vi->vi_chans, struct tegra_channel, list);
do {
/* Get the next endpoint and parse its entities. */
next = of_graph_get_next_endpoint(vi->dev->of_node, ep);
if (next == NULL)
break;
ep = next;
if (!of_device_is_available(ep)) {
dev_info(vi->dev, "ep of_device is not enabled %s.\n",
ep->full_name);
if (list_is_last(&chan->list, &vi->vi_chans))
break;
/* Try the next channel */
chan = list_next_entry(chan, list);
continue;
}
chan->endpoint_node = ep;
entity = devm_kzalloc(vi->dev, sizeof(*entity), GFP_KERNEL);
if (entity == NULL) {
ret = -ENOMEM;
goto done;
}
dev_dbg(vi->dev, "handling endpoint %s\n", ep->full_name);
remote = of_graph_get_remote_port_parent(ep);
if (!remote) {
dev_info(vi->dev, "cannot find remote port parent\n");
if (list_is_last(&chan->list, &vi->vi_chans))
break;
/* Try the next channel */
chan = list_next_entry(chan, list);
continue;
}
if (!of_device_is_available(remote)) {
dev_info(vi->dev, "remote of_device is not enabled %s.\n",
ep->full_name);
if (list_is_last(&chan->list, &vi->vi_chans))
break;
/* Try the next channel */
chan = list_next_entry(chan, list);
continue;
}
/* Add the remote entity of this endpoint */
entity->node = remote;
entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
entity->asd.match.fwnode = of_fwnode_handle(remote);
list_add_tail(&entity->list, &chan->entities);
chan->num_subdevs++;
chan->notifier.ops = chan->notifier.ops ? chan->notifier.ops : &vi_chan_notify_ops;
/* Parse and add entities on this enpoint/channel */
ret = tegra_vi_graph_parse_one(chan, entity->node);
if (ret < 0) {
dev_info(vi->dev, "graph parse error: %s.\n",
entity->node->full_name);
if (list_is_last(&chan->list, &vi->vi_chans))
break;
/* Try the next channel */
chan = list_next_entry(chan, list);
continue;
}
num_subdevs = chan->num_subdevs;
i = 0;
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)
v4l2_async_notifier_init(&chan->notifier);
list_for_each_entry(entity, &chan->entities, list)
__v4l2_async_notifier_add_subdev(&chan->notifier, &entity->asd);
#else
v4l2_async_nf_init(&chan->notifier);
list_for_each_entry(entity, &chan->entities, list)
__v4l2_async_nf_add_subdev(&chan->notifier, &entity->asd);
#endif
chan->link_status = 0;
chan->subdevs_bound = 0;
/* Register the async notifier for this channel */
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)
ret = v4l2_async_notifier_register(&vi->v4l2_dev,
&chan->notifier);
#else
ret = v4l2_async_nf_register(&vi->v4l2_dev,
&chan->notifier);
#endif
if (ret < 0) {
dev_err(vi->dev, "notifier registration failed\n");
goto done;
}
if (list_is_last(&chan->list, &vi->vi_chans))
break;
/* One endpoint for each vi channel, go with the next channel */
chan = list_next_entry(chan, list);
} while (next);
done:
if (ret == -ENOMEM) {
dev_err(vi->dev, "graph init failed\n");
tegra_vi_graph_cleanup(vi);
return ret;
}
return 0;
}
EXPORT_SYMBOL(tegra_vi_graph_init);

View File

@@ -0,0 +1,362 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Tegra Video Input device common APIs
*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <media/tegra_v4l2_camera.h>
#include <media/camera_common.h>
#include <media/v4l2-event.h>
#include <media/tegra_camera_platform.h>
#include <media/vi.h>
#include <media/vi2_registers.h>
#include <linux/nvhost.h>
static struct tegra_mc_vi *tegra_mcvi;
struct tegra_mc_vi *tegra_get_mc_vi(void)
{
return tegra_mcvi;
}
EXPORT_SYMBOL(tegra_get_mc_vi);
/* In TPG mode, VI only support 2 formats */
static void vi_tpg_fmts_bitmap_init(struct tegra_channel *chan)
{
int index;
bitmap_zero(chan->fmts_bitmap, MAX_FORMAT_NUM);
index = tegra_core_get_idx_by_code(chan,
MEDIA_BUS_FMT_SRGGB10_1X10, 0);
bitmap_set(chan->fmts_bitmap, index, 1);
index = tegra_core_get_idx_by_code(chan,
MEDIA_BUS_FMT_RGB888_1X32_PADHI, 0);
bitmap_set(chan->fmts_bitmap, index, 1);
}
/* -----------------------------------------------------------------------------
* Media Controller and V4L2
*/
static const char *const vi_pattern_strings[] = {
"Disabled",
"Black/White Direct Mode",
"Color Patch Mode",
};
static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct tegra_mc_vi *vi = container_of(ctrl->handler, struct tegra_mc_vi,
ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
/*
* TPG control is only avaiable to TPG driver,
* it can't be changed to 0 to disable TPG mode.
*/
if (ctrl->val) {
dev_info(&vi->ndev->dev, "Set TPG mode to %d\n",
ctrl->val);
vi->pg_mode = ctrl->val;
}
break;
default:
dev_err(vi->dev, "%s:Not valid ctrl\n", __func__);
return -EINVAL;
}
return 0;
}
static const struct v4l2_ctrl_ops vi_ctrl_ops = {
.s_ctrl = vi_s_ctrl,
};
void tegra_vi_v4l2_cleanup(struct tegra_mc_vi *vi)
{
v4l2_ctrl_handler_free(&vi->ctrl_handler);
v4l2_device_unregister(&vi->v4l2_dev);
if (!vi->pg_mode)
media_device_unregister(&vi->media_dev);
}
EXPORT_SYMBOL(tegra_vi_v4l2_cleanup);
static void tegra_vi_notify(struct v4l2_subdev *sd,
unsigned int notification, void *arg)
{
struct tegra_mc_vi *vi = container_of(sd->v4l2_dev,
struct tegra_mc_vi, v4l2_dev);
const struct v4l2_event *ev = arg;
unsigned i;
struct tegra_channel *chan;
if (notification != V4L2_DEVICE_NOTIFY_EVENT)
return;
list_for_each_entry(chan, &vi->vi_chans, list) {
for (i = 0; i < chan->num_subdevs; i++)
if (sd == chan->subdev[i]) {
v4l2_event_queue(chan->video, arg);
if (ev->type == V4L2_EVENT_SOURCE_CHANGE &&
vb2_is_streaming(&chan->queue))
vb2_queue_error(&chan->queue);
}
}
}
int tegra_vi_v4l2_init(struct tegra_mc_vi *vi)
{
int ret;
vi->media_dev.dev = vi->dev;
strlcpy(vi->media_dev.model, "NVIDIA Tegra Video Input Device",
sizeof(vi->media_dev.model));
vi->media_dev.hw_revision = 3;
media_device_init(&vi->media_dev);
ret = media_device_register(&vi->media_dev);
if (ret < 0) {
dev_err(vi->dev,
"media device registration failed (%d)\n",
ret);
return ret;
}
mutex_init(&vi->bw_update_lock);
vi->v4l2_dev.mdev = &vi->media_dev;
vi->v4l2_dev.notify = tegra_vi_notify;
ret = v4l2_device_register(vi->dev, &vi->v4l2_dev);
if (ret < 0) {
dev_err(vi->dev, "V4L2 device registration failed (%d)\n",
ret);
goto register_error;
}
return 0;
register_error:
media_device_cleanup(&vi->media_dev);
media_device_unregister(&vi->media_dev);
return ret;
}
static int vi_parse_dt(struct tegra_mc_vi *vi, struct platform_device *dev)
{
int err = 0;
int num_channels = 0;
int i;
struct tegra_channel *item;
struct device_node *node = dev->dev.of_node;
err = of_property_read_u32(node, "num-channels", &num_channels);
if (err) {
dev_dbg(&dev->dev,
"Failed to find num of channels, set to 0\n");
num_channels = 0;
}
vi->num_channels = num_channels;
for (i = 0; i < num_channels; i++) {
item = devm_kzalloc(vi->dev, sizeof(*item), GFP_KERNEL);
if (!item)
return -ENOMEM;
item->id = i;
list_add_tail(&item->list, &vi->vi_chans);
}
return 0;
}
static void set_vi_register_base(struct tegra_mc_vi *mc_vi,
void __iomem *regbase)
{
mc_vi->iomem = regbase;
}
int tpg_vi_media_controller_init(struct tegra_mc_vi *mc_vi, int pg_mode)
{
int err = 0, i;
struct tegra_channel *item;
const unsigned int num_pre_channels = mc_vi->num_channels;
/* Allocate TPG channel */
v4l2_ctrl_handler_init(&mc_vi->ctrl_handler, 1);
mc_vi->pattern = v4l2_ctrl_new_std_menu_items(&mc_vi->ctrl_handler,
&vi_ctrl_ops, V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(vi_pattern_strings) - 1,
0, mc_vi->pg_mode, vi_pattern_strings);
if (mc_vi->ctrl_handler.error) {
dev_err(mc_vi->dev, "failed to add controls\n");
err = mc_vi->ctrl_handler.error;
goto ctrl_error;
}
mc_vi->tpg_start = NULL;
for (i = 0; i < mc_vi->csi->num_tpg_channels; i++) {
item = devm_kzalloc(mc_vi->dev, sizeof(*item), GFP_KERNEL);
if (!item)
goto channel_init_error;
item->id = num_pre_channels + i;
item->pg_mode = pg_mode;
item->vi = mc_vi;
err = tegra_channel_init(item);
if (err) {
devm_kfree(mc_vi->dev, item);
goto channel_init_error;
}
/* Allocate video_device */
err = tegra_channel_init_video(item);
if (err < 0) {
devm_kfree(mc_vi->dev, item);
dev_err(&item->video->dev, "failed to allocate video device %s\n",
item->video->name);
goto channel_init_error;
}
err = video_register_device(item->video, VFL_TYPE_VIDEO, -1);
if (err < 0) {
devm_kfree(mc_vi->dev, item);
video_device_release(item->video);
dev_err(&item->video->dev, "failed to register %s\n",
item->video->name);
goto channel_init_error;
}
vi_tpg_fmts_bitmap_init(item);
/* only inited tpg channels are added */
list_add_tail(&item->list, &mc_vi->vi_chans);
if (mc_vi->tpg_start == NULL)
mc_vi->tpg_start = item;
}
mc_vi->num_channels += mc_vi->csi->num_tpg_channels;
err = tegra_vi_tpg_graph_init(mc_vi);
if (err)
goto channel_init_error;
return err;
channel_init_error:
dev_err(mc_vi->dev, "%s: channel init failed\n", __func__);
if (!mc_vi->tpg_start)
tpg_vi_media_controller_cleanup(mc_vi);
return err;
ctrl_error:
v4l2_ctrl_handler_free(&mc_vi->ctrl_handler);
dev_err(mc_vi->dev, "%s: v2l4_ctl error\n", __func__);
return err;
}
EXPORT_SYMBOL(tpg_vi_media_controller_init);
void tpg_vi_media_controller_cleanup(struct tegra_mc_vi *mc_vi)
{
struct tegra_channel *item;
struct tegra_channel *itemn;
list_for_each_entry_safe(item, itemn, &mc_vi->vi_chans, list) {
if (!item->pg_mode)
continue;
if (item->video->cdev != NULL)
video_unregister_device(item->video);
tegra_channel_cleanup(item);
list_del(&item->list);
devm_kfree(mc_vi->dev, item);
mc_vi->num_channels--;
}
mc_vi->tpg_start = NULL;
v4l2_ctrl_handler_free(&mc_vi->ctrl_handler);
}
EXPORT_SYMBOL(tpg_vi_media_controller_cleanup);
static int tegra_vi_media_controller_init_int(struct tegra_mc_vi *mc_vi,
struct platform_device *pdev)
{
int err = 0;
mc_vi->ndev = pdev;
mc_vi->dev = &pdev->dev;
INIT_LIST_HEAD(&mc_vi->vi_chans);
mutex_init(&mc_vi->mipical_lock);
err = vi_parse_dt(mc_vi, pdev);
if (err)
goto mc_init_fail;
tegra_mcvi = mc_vi;
err = tegra_vi_v4l2_init(mc_vi);
if (err < 0)
goto mc_init_fail;
/*
* if there is no vi channels listed in DT,
* no need to init the channel and graph
*/
if (mc_vi->num_channels == 0)
return 0;
/* Init Tegra VI channels */
err = tegra_vi_channels_init(mc_vi);
if (err < 0) {
dev_err(&pdev->dev, "Init channel failed\n");
goto channels_error;
}
/* Setup media links between VI and external sensor subdev. */
err = tegra_vi_graph_init(mc_vi);
if (err < 0)
goto graph_error;
return 0;
graph_error:
tegra_vi_channels_cleanup(mc_vi);
channels_error:
tegra_vi_v4l2_cleanup(mc_vi);
mc_init_fail:
dev_err(&pdev->dev, "%s: failed\n", __func__);
return err;
}
int tegra_vi_media_controller_init(struct tegra_mc_vi *mc_vi,
struct platform_device *pdev)
{
struct nvhost_device_data *pdata = (struct nvhost_device_data *)
platform_get_drvdata(pdev);
if (!pdata)
return -EINVAL;
set_vi_register_base(mc_vi, pdata->aperture[0]);
return tegra_vi_media_controller_init_int(mc_vi, pdev);
}
EXPORT_SYMBOL(tegra_vi_media_controller_init);
int tegra_capture_vi_media_controller_init(struct tegra_mc_vi *mc_vi,
struct platform_device *pdev)
{
return tegra_vi_media_controller_init_int(mc_vi, pdev);
}
EXPORT_SYMBOL(tegra_capture_vi_media_controller_init);
void tegra_vi_media_controller_cleanup(struct tegra_mc_vi *mc_vi)
{
tegra_vi_channels_unregister(mc_vi);
tegra_vi_graph_cleanup(mc_vi);
tegra_vi_channels_cleanup(mc_vi);
tegra_vi_v4l2_cleanup(mc_vi);
tegra_mcvi = NULL;
}
EXPORT_SYMBOL(tegra_vi_media_controller_cleanup);

View File

@@ -0,0 +1,994 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Tegra Video Input 5 device common APIs
*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/nvhost.h>
#include <linux/semaphore.h>
#include <media/tegra_camera_platform.h>
#include <media/mc_common.h>
#include <media/tegra-v4l2-camera.h>
#include <media/fusa-capture/capture-vi-channel.h>
#include <media/fusa-capture/capture-vi.h>
#include <soc/tegra/camrtc-capture.h>
#include "vi5_formats.h"
#include "vi5_fops.h"
#include <trace/events/camera_common.h>
#define DEFAULT_FRAMERATE 30
#define BPP_MEM 2
#define VI_CSI_CLK_SCALE 110
#define PG_BITRATE 32
#define SLVSEC_STREAM_MAIN 0U
#define CAPTURE_TIMEOUT_MS 2500
static const struct vi_capture_setup default_setup = {
.channel_flags = 0
| CAPTURE_CHANNEL_FLAG_VIDEO
| CAPTURE_CHANNEL_FLAG_RAW
| CAPTURE_CHANNEL_FLAG_EMBDATA
| CAPTURE_CHANNEL_FLAG_LINETIMER
,
.vi_channel_mask = ~0ULL,
.vi2_channel_mask = ~0ULL,
.queue_depth = CAPTURE_MIN_BUFFERS,
.request_size = sizeof(struct capture_descriptor),
.mem = 0, /* fill in later */
};
static const struct capture_descriptor capture_template = {
.sequence = 0,
.capture_flags = 0
| CAPTURE_FLAG_STATUS_REPORT_ENABLE
| CAPTURE_FLAG_ERROR_REPORT_ENABLE
,
.ch_cfg = {
.pixfmt_enable = 0, /* no output */
.match = {
.stream = 0, /* one-hot bit encoding */
.stream_mask = 0x3f,
.vc = (1u << 0), /* one-hot bit encoding */
.vc_mask = 0xffff,
},
},
};
static void vi5_init_video_formats(struct tegra_channel *chan)
{
int i;
chan->num_video_formats = ARRAY_SIZE(vi5_video_formats);
for (i = 0; i < chan->num_video_formats; i++)
chan->video_formats[i] = &vi5_video_formats[i];
}
static int tegra_vi5_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct tegra_channel *chan = container_of(ctrl->handler,
struct tegra_channel, ctrl_handler);
struct v4l2_subdev *sd = chan->subdev_on_csi;
struct camera_common_data *s_data =
to_camera_common_data(sd->dev);
struct tegracam_ctrl_handler *handler;
struct tegracam_sensor_data *sensor_data;
if (!s_data)
return -EINVAL;
handler = s_data->tegracam_ctrl_hdl;
if (!handler)
return -EINVAL;
sensor_data = &handler->sensor_data;
/* TODO: Support reading blobs for multiple devices */
switch (ctrl->id) {
case TEGRA_CAMERA_CID_SENSOR_CONFIG: {
struct sensor_cfg *cfg = &s_data->sensor_props.cfg;
memcpy(ctrl->p_new.p, cfg, sizeof(struct sensor_cfg));
break;
}
case TEGRA_CAMERA_CID_SENSOR_MODE_BLOB: {
struct sensor_blob *blob = &sensor_data->mode_blob;
memcpy(ctrl->p_new.p, blob, sizeof(struct sensor_blob));
break;
}
case TEGRA_CAMERA_CID_SENSOR_CONTROL_BLOB: {
struct sensor_blob *blob = &sensor_data->ctrls_blob;
memcpy(ctrl->p_new.p, blob, sizeof(struct sensor_blob));
break;
}
default:
pr_err("%s: unknown ctrl id.\n", __func__);
return -EINVAL;
}
return 0;
}
static int tegra_vi5_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct tegra_channel *chan = container_of(ctrl->handler,
struct tegra_channel, ctrl_handler);
int err = 0;
switch (ctrl->id) {
case TEGRA_CAMERA_CID_WRITE_ISPFORMAT:
chan->write_ispformat = ctrl->val;
break;
default:
dev_err(&chan->video->dev, "%s:Not valid ctrl\n", __func__);
return -EINVAL;
}
return err;
}
static const struct v4l2_ctrl_ops vi5_ctrl_ops = {
.s_ctrl = tegra_vi5_s_ctrl,
.g_volatile_ctrl = tegra_vi5_g_volatile_ctrl,
};
static const struct v4l2_ctrl_config vi5_custom_ctrls[] = {
{
.ops = &vi5_ctrl_ops,
.id = TEGRA_CAMERA_CID_WRITE_ISPFORMAT,
.name = "Write ISP format",
.type = V4L2_CTRL_TYPE_INTEGER,
.def = 1,
.min = 1,
.max = 1,
.step = 1,
},
{
.ops = &vi5_ctrl_ops,
.id = TEGRA_CAMERA_CID_SENSOR_CONFIG,
.name = "Sensor configuration",
.type = V4L2_CTRL_TYPE_U32,
.flags = V4L2_CTRL_FLAG_READ_ONLY |
V4L2_CTRL_FLAG_HAS_PAYLOAD |
V4L2_CTRL_FLAG_VOLATILE,
.min = 0,
.max = 0xFFFFFFFF,
.def = 0,
.step = 1,
.dims = { SENSOR_CONFIG_SIZE },
},
{
.ops = &vi5_ctrl_ops,
.id = TEGRA_CAMERA_CID_SENSOR_MODE_BLOB,
.name = "Sensor mode I2C packet",
.type = V4L2_CTRL_TYPE_U32,
.flags = V4L2_CTRL_FLAG_READ_ONLY |
V4L2_CTRL_FLAG_HAS_PAYLOAD |
V4L2_CTRL_FLAG_VOLATILE,
.min = 0,
.max = 0xFFFFFFFF,
.def = 0,
.step = 1,
.dims = { SENSOR_MODE_BLOB_SIZE },
},
{
.ops = &vi5_ctrl_ops,
.id = TEGRA_CAMERA_CID_SENSOR_CONTROL_BLOB,
.name = "Sensor control I2C packet",
.type = V4L2_CTRL_TYPE_U32,
.flags = V4L2_CTRL_FLAG_READ_ONLY |
V4L2_CTRL_FLAG_HAS_PAYLOAD |
V4L2_CTRL_FLAG_VOLATILE,
.min = 0,
.max = 0xFFFFFFFF,
.def = 0,
.step = 1,
.dims = { SENSOR_CTRL_BLOB_SIZE },
},
};
static int vi5_add_ctrls(struct tegra_channel *chan)
{
int i;
/* Add vi5 custom controls */
for (i = 0; i < ARRAY_SIZE(vi5_custom_ctrls); i++) {
v4l2_ctrl_new_custom(&chan->ctrl_handler,
&vi5_custom_ctrls[i], NULL);
if (chan->ctrl_handler.error) {
dev_err(chan->vi->dev,
"Failed to add %s ctrl\n",
vi5_custom_ctrls[i].name);
return chan->ctrl_handler.error;
}
}
return 0;
}
static int vi5_channel_setup_queue(struct tegra_channel *chan,
unsigned int *nbuffers)
{
int ret = 0;
*nbuffers = clamp(*nbuffers, CAPTURE_MIN_BUFFERS, CAPTURE_MAX_BUFFERS);
ret = tegra_channel_alloc_buffer_queue(chan, *nbuffers);
if (ret < 0)
goto done;
chan->capture_reqs_enqueued = 0;
done:
return ret;
}
static struct tegra_csi_channel *find_linked_csi_channel(
struct tegra_channel *chan)
{
struct tegra_csi_channel *csi_it;
struct tegra_csi_channel *csi_chan = NULL;
int i;
struct tegra_csi_device *csi = tegra_get_mc_csi();
if (csi == NULL)
{
dev_err(chan->vi->dev, "csi mc not found");
return NULL;
}
/* Find connected csi_channel */
list_for_each_entry(csi_it, &csi->csi_chans, list) {
for (i = 0; i < chan->num_subdevs; i++) {
if (chan->subdev[i] == &csi_it->subdev) {
csi_chan = csi_it;
break;
}
}
}
return csi_chan;
}
static int tegra_channel_capture_setup(struct tegra_channel *chan, unsigned int vi_port)
{
struct vi_capture_setup setup = default_setup;
long err;
setup.queue_depth = chan->capture_queue_depth;
trace_tegra_channel_capture_setup(chan, 0);
chan->request[vi_port] = dma_alloc_coherent(chan->tegra_vi_channel[vi_port]->rtcpu_dev,
setup.queue_depth * setup.request_size,
&setup.iova, GFP_KERNEL);
if (chan->request[vi_port] == NULL) {
dev_err(chan->vi->dev, "dma_alloc_coherent failed\n");
return -ENOMEM;
}
if (chan->is_slvsec) {
setup.channel_flags |= CAPTURE_CHANNEL_FLAG_SLVSEC;
setup.slvsec_stream_main = SLVSEC_STREAM_MAIN;
setup.slvsec_stream_sub = SLVSEC_STREAM_DISABLED;
}
/* Set the NVCSI PixelParser index (Stream ID) and VC ID*/
setup.csi_stream_id = chan->port[vi_port];
setup.virtual_channel_id = chan->virtual_channel;
/* Set CSI port info */
if (chan->pg_mode) {
setup.csi_port = NVCSI_PORT_UNSPECIFIED;
} else {
struct tegra_csi_channel *csi_chan = find_linked_csi_channel(chan);
if (csi_chan == NULL)
{
dev_err(chan->vi->dev, "csi_chan not found");
return -EINVAL;
}
setup.csi_port = csi_chan->ports[vi_port].csi_port;
}
if (chan->fmtinfo->fourcc == V4L2_PIX_FMT_NV16)
setup.channel_flags |= CAPTURE_CHANNEL_FLAG_SEMI_PLANAR;
err = vi_capture_setup(chan->tegra_vi_channel[vi_port], &setup);
if (err) {
dev_err(chan->vi->dev, "vi capture setup failed\n");
dma_free_coherent(chan->tegra_vi_channel[vi_port]->rtcpu_dev,
setup.queue_depth * setup.request_size,
chan->request, setup.iova);
return err;
}
return 0;
}
static void vi5_setup_surface(struct tegra_channel *chan,
struct tegra_channel_buffer *buf, unsigned int descr_index, unsigned int vi_port)
{
dma_addr_t offset = buf->addr + chan->buffer_offset[vi_port];
u32 height = chan->format.height;
u32 width = chan->format.width;
u32 format = chan->fmtinfo->img_fmt;
u32 bpl = chan->format.bytesperline;
u32 data_type = chan->fmtinfo->img_dt;
u32 nvcsi_stream = chan->port[vi_port];
struct capture_descriptor_memoryinfo *desc_memoryinfo =
&chan->tegra_vi_channel[vi_port]->
capture_data->requests_memoryinfo[descr_index];
struct capture_descriptor *desc = &chan->request[vi_port][descr_index];
if (chan->valid_ports > NVCSI_STREAM_1) {
height = chan->gang_height;
width = chan->gang_width;
offset = buf->addr + chan->buffer_offset[1 - vi_port];
}
memcpy(desc, &capture_template, sizeof(capture_template));
memset(desc_memoryinfo, 0, sizeof(*desc_memoryinfo));
desc->sequence = chan->capture_descr_sequence;
desc->ch_cfg.match.stream = (1u << nvcsi_stream); /* one-hot bit encoding */
desc->ch_cfg.match.vc = (1u << chan->virtual_channel); /* one-hot bit encoding */
desc->ch_cfg.frame.frame_x = width;
desc->ch_cfg.frame.frame_y = height;
desc->ch_cfg.match.datatype = data_type;
desc->ch_cfg.match.datatype_mask = 0x3f;
desc->ch_cfg.pixfmt_enable = 1;
desc->ch_cfg.pixfmt.format = format;
desc_memoryinfo->surface[0].base_address = offset;
desc_memoryinfo->surface[0].size = chan->format.bytesperline * height;
desc->ch_cfg.atomp.surface_stride[0] = bpl;
if (chan->fmtinfo->fourcc == V4L2_PIX_FMT_NV16) {
desc_memoryinfo->surface[1].base_address = offset + chan->format.sizeimage / 2;
desc_memoryinfo->surface[1].size = chan->format.bytesperline * height;
desc->ch_cfg.atomp.surface_stride[1] = bpl;
}
if (chan->embedded_data_height > 0) {
desc->ch_cfg.embdata_enable = 1;
desc->ch_cfg.frame.embed_x = chan->embedded_data_width * BPP_MEM;
desc->ch_cfg.frame.embed_y = chan->embedded_data_height;
desc_memoryinfo->surface[VI_ATOMP_SURFACE_EMBEDDED].base_address
= chan->emb_buf;
desc_memoryinfo->surface[VI_ATOMP_SURFACE_EMBEDDED].size
= desc->ch_cfg.frame.embed_x * desc->ch_cfg.frame.embed_y;
desc->ch_cfg.atomp.surface_stride[VI_ATOMP_SURFACE_EMBEDDED]
= chan->embedded_data_width * BPP_MEM;
}
chan->capture_descr_sequence += 1;
}
static void vi5_release_buffer(struct tegra_channel *chan,
struct tegra_channel_buffer *buf)
{
struct vb2_v4l2_buffer *vbuf = &buf->buf;
vbuf->sequence = chan->sequence++;
vbuf->field = V4L2_FIELD_NONE;
vb2_set_plane_payload(&vbuf->vb2_buf, 0, chan->format.sizeimage);
vb2_buffer_done(&vbuf->vb2_buf, buf->vb2_state);
}
static void vi5_capture_enqueue(struct tegra_channel *chan,
struct tegra_channel_buffer *buf)
{
int err = 0;
unsigned int vi_port;
unsigned long flags;
struct tegra_mc_vi *vi = chan->vi;
struct vi_capture_req request[2] = {{
.buffer_index = 0,
}, {
.buffer_index = 0,
}};
for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) {
vi5_setup_surface(chan, buf, chan->capture_descr_index, vi_port);
request[vi_port].buffer_index = chan->capture_descr_index;
err = vi_capture_request(chan->tegra_vi_channel[vi_port], &request[vi_port]);
if (err) {
dev_err(vi->dev, "uncorr_err: request dispatch err %d\n", err);
goto uncorr_err;
}
spin_lock_irqsave(&chan->capture_state_lock, flags);
if (chan->capture_state != CAPTURE_ERROR) {
chan->capture_state = CAPTURE_GOOD;
chan->capture_reqs_enqueued += 1;
}
spin_unlock_irqrestore(&chan->capture_state_lock, flags);
buf->capture_descr_index[vi_port] = chan->capture_descr_index;
}
chan->capture_descr_index = ((chan->capture_descr_index + 1)
% (chan->capture_queue_depth));
spin_lock(&chan->dequeue_lock);
list_add_tail(&buf->queue, &chan->dequeue);
spin_unlock(&chan->dequeue_lock);
wake_up_interruptible(&chan->dequeue_wait);
return;
uncorr_err:
spin_lock_irqsave(&chan->capture_state_lock, flags);
chan->capture_state = CAPTURE_ERROR;
spin_unlock_irqrestore(&chan->capture_state_lock, flags);
}
static void vi5_capture_dequeue(struct tegra_channel *chan,
struct tegra_channel_buffer *buf)
{
int err = 0;
unsigned int vi_port = 0;
int gang_prev_frame_id = 0;
unsigned long flags;
struct tegra_mc_vi *vi = chan->vi;
struct vb2_v4l2_buffer *vb = &buf->buf;
struct timespec64 ts;
struct capture_descriptor *descr = NULL;
for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) {
descr = &chan->request[vi_port][buf->capture_descr_index[vi_port]];
if (buf->vb2_state != VB2_BUF_STATE_ACTIVE)
goto rel_buf;
/* Dequeue a frame and check its capture status */
err = vi_capture_status(chan->tegra_vi_channel[vi_port], CAPTURE_TIMEOUT_MS);
if (err) {
if (err == -ETIMEDOUT) {
dev_err(vi->dev,
"uncorr_err: request timed out after %d ms\n",
CAPTURE_TIMEOUT_MS);
} else {
dev_err(vi->dev, "uncorr_err: request err %d\n", err);
}
goto uncorr_err;
} else if (descr->status.status != CAPTURE_STATUS_SUCCESS) {
if ((descr->status.flags
& CAPTURE_STATUS_FLAG_CHANNEL_IN_ERROR) != 0) {
chan->queue_error = true;
dev_err(vi->dev, "uncorr_err: flags %d, err_data %d\n",
descr->status.flags, descr->status.err_data);
} else {
dev_warn(vi->dev,
"corr_err: discarding frame %d, flags: %d, "
"err_data %d\n",
descr->status.frame_id, descr->status.flags,
descr->status.err_data);
buf->vb2_state = VB2_BUF_STATE_ERROR;
goto done;
}
} else if (!vi_port) {
gang_prev_frame_id = descr->status.frame_id;
} else if (descr->status.frame_id != gang_prev_frame_id) {
dev_err(vi->dev, "frame_id out of sync: ch2 %d vs ch1 %d\n",
gang_prev_frame_id, descr->status.frame_id);
goto uncorr_err;
}
spin_lock_irqsave(&chan->capture_state_lock, flags);
if (chan->capture_state != CAPTURE_ERROR) {
chan->capture_reqs_enqueued -= 1;
chan->capture_state = CAPTURE_GOOD;
}
spin_unlock_irqrestore(&chan->capture_state_lock, flags);
}
wake_up_interruptible(&chan->start_wait);
/* Read SOF from capture descriptor */
ts = ns_to_timespec64((s64)descr->status.sof_timestamp);
trace_tegra_channel_capture_frame("sof", &ts);
vb->vb2_buf.timestamp = descr->status.sof_timestamp;
buf->vb2_state = VB2_BUF_STATE_DONE;
/* Read EOF from capture descriptor */
ts = ns_to_timespec64((s64)descr->status.eof_timestamp);
trace_tegra_channel_capture_frame("eof", &ts);
done:
goto rel_buf;
uncorr_err:
spin_lock_irqsave(&chan->capture_state_lock, flags);
chan->capture_state = CAPTURE_ERROR;
spin_unlock_irqrestore(&chan->capture_state_lock, flags);
buf->vb2_state = VB2_BUF_STATE_ERROR;
rel_buf:
vi5_release_buffer(chan, buf);
}
static int vi5_channel_error_recover(struct tegra_channel *chan,
bool queue_error)
{
int err = 0;
unsigned int vi_port = 0;
struct tegra_channel_buffer *buf;
struct tegra_mc_vi *vi = chan->vi;
struct v4l2_subdev *csi_subdev;
/* stop vi channel */
for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) {
err = vi_capture_release(chan->tegra_vi_channel[vi_port],
CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE);
if (err) {
dev_err(&chan->video->dev, "vi capture release failed\n");
goto done;
}
vi_channel_close_ex(chan->id + vi_port, chan->tegra_vi_channel[vi_port]);
chan->tegra_vi_channel[vi_port] = NULL;
}
/* release all previously-enqueued capture buffers to v4l2 */
while (!list_empty(&chan->capture)) {
buf = dequeue_buffer(chan, false);
if (!buf)
break;
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
}
while (!list_empty(&chan->dequeue)) {
buf = dequeue_dequeue_buffer(chan);
if (!buf)
break;
buf->vb2_state = VB2_BUF_STATE_ERROR;
vi5_capture_dequeue(chan, buf);
}
/* report queue error to application */
if (queue_error)
vb2_queue_error(&chan->queue);
/* reset nvcsi stream */
csi_subdev = tegra_channel_find_linked_csi_subdev(chan);
if (!csi_subdev) {
dev_err(vi->dev, "unable to find linked csi subdev\n");
err = -1;
goto done;
}
#if 0 /* disable for Canonical kernel */
v4l2_subdev_call(csi_subdev, core, sync,
V4L2_SYNC_EVENT_SUBDEV_ERROR_RECOVER);
#endif
/* restart vi channel */
for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) {
chan->tegra_vi_channel[vi_port] = vi_channel_open_ex(chan->id + vi_port, false);
if (IS_ERR(chan->tegra_vi_channel[vi_port])) {
err = PTR_ERR(chan);
goto done;
}
err = tegra_channel_capture_setup(chan, vi_port);
if (err < 0)
goto done;
}
chan->sequence = 0;
tegra_channel_init_ring_buffer(chan);
chan->capture_reqs_enqueued = 0;
/* clear capture channel error state */
chan->capture_state = CAPTURE_IDLE;
done:
return err;
}
static int tegra_channel_kthread_capture_enqueue(void *data)
{
struct tegra_channel *chan = data;
struct tegra_channel_buffer *buf;
unsigned long flags;
set_freezable();
while (1) {
try_to_freeze();
wait_event_interruptible(chan->start_wait,
(kthread_should_stop() || !list_empty(&chan->capture)));
while (!(kthread_should_stop() || list_empty(&chan->capture))) {
spin_lock_irqsave(&chan->capture_state_lock, flags);
if ((chan->capture_state == CAPTURE_ERROR)
|| !(chan->capture_reqs_enqueued
< (chan->capture_queue_depth * chan->valid_ports))) {
spin_unlock_irqrestore(
&chan->capture_state_lock, flags);
break;
}
spin_unlock_irqrestore(&chan->capture_state_lock,
flags);
buf = dequeue_buffer(chan, false);
if (!buf)
break;
buf->vb2_state = VB2_BUF_STATE_ACTIVE;
vi5_capture_enqueue(chan, buf);
}
if (kthread_should_stop())
break;
}
return 0;
}
static int tegra_channel_kthread_capture_dequeue(void *data)
{
int err = 0;
unsigned long flags;
struct tegra_channel *chan = data;
struct tegra_channel_buffer *buf;
set_freezable();
while (1) {
try_to_freeze();
wait_event_interruptible(chan->dequeue_wait,
(kthread_should_stop()
|| !list_empty(&chan->dequeue)
|| (chan->capture_state == CAPTURE_ERROR)));
while (!(kthread_should_stop() || list_empty(&chan->dequeue)
|| (chan->capture_state == CAPTURE_ERROR))) {
buf = dequeue_dequeue_buffer(chan);
if (!buf)
break;
vi5_capture_dequeue(chan, buf);
}
spin_lock_irqsave(&chan->capture_state_lock, flags);
if (chan->capture_state == CAPTURE_ERROR) {
spin_unlock_irqrestore(&chan->capture_state_lock,
flags);
err = tegra_channel_error_recover(chan, false);
if (err) {
dev_err(chan->vi->dev,
"fatal: error recovery failed\n");
break;
}
} else
spin_unlock_irqrestore(&chan->capture_state_lock,
flags);
if (kthread_should_stop())
break;
}
return 0;
}
static int vi5_channel_start_kthreads(struct tegra_channel *chan)
{
int err = 0;
/* Start the kthread for capture enqueue */
if (chan->kthread_capture_start) {
dev_err(chan->vi->dev, "enqueue kthread already initialized\n");
err = -1;
goto done;
}
chan->kthread_capture_start = kthread_run(
tegra_channel_kthread_capture_enqueue, chan, chan->video->name);
if (IS_ERR(chan->kthread_capture_start)) {
dev_err(&chan->video->dev,
"failed to run kthread for capture enqueue\n");
err = PTR_ERR(chan->kthread_capture_start);
goto done;
}
/* Start the kthread for capture dequeue */
if (chan->kthread_capture_dequeue) {
dev_err(chan->vi->dev, "dequeue kthread already initialized\n");
err = -1;
goto done;
}
chan->kthread_capture_dequeue = kthread_run(
tegra_channel_kthread_capture_dequeue, chan, chan->video->name);
if (IS_ERR(chan->kthread_capture_dequeue)) {
dev_err(&chan->video->dev,
"failed to run kthread for capture dequeue\n");
err = PTR_ERR(chan->kthread_capture_dequeue);
goto done;
}
done:
return err;
}
static void vi5_channel_stop_kthreads(struct tegra_channel *chan)
{
mutex_lock(&chan->stop_kthread_lock);
/* Stop the kthread for capture enqueue */
if (chan->kthread_capture_start) {
kthread_stop(chan->kthread_capture_start);
chan->kthread_capture_start = NULL;
}
/* Stop the kthread for capture dequeue */
if (chan->kthread_capture_dequeue) {
kthread_stop(chan->kthread_capture_dequeue);
chan->kthread_capture_dequeue = NULL;
}
mutex_unlock(&chan->stop_kthread_lock);
}
static void vi5_unit_get_device_handle(struct platform_device *pdev,
uint32_t csi_stream_id, struct device **dev)
{
if (dev)
*dev = vi_csi_stream_to_nvhost_device(pdev, csi_stream_id);
else
dev_err(&pdev->dev, "dev pointer is NULL\n");
}
static int vi5_channel_start_streaming(struct vb2_queue *vq, u32 count)
{
struct tegra_channel *chan = vb2_get_drv_priv(vq);
/* WAR: With newer version pipe init has some race condition */
/* TODO: resolve this issue to block userspace not to cleanup media */
int ret = 0;
int vi_port = 0;
unsigned long flags;
struct v4l2_subdev *sd;
struct device_node *node;
struct sensor_mode_properties *sensor_mode;
struct camera_common_data *s_data;
unsigned int emb_buf_size = 0;
/* Skip in bypass mode */
if (!chan->bypass) {
for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) {
chan->tegra_vi_channel[vi_port] =
vi_channel_open_ex(chan->id + vi_port, false);
if (IS_ERR(chan->tegra_vi_channel[vi_port])) {
ret = PTR_ERR(chan);
goto err_open_ex;
}
spin_lock_irqsave(&chan->capture_state_lock, flags);
chan->capture_state = CAPTURE_IDLE;
spin_unlock_irqrestore(&chan->capture_state_lock, flags);
if (!chan->pg_mode) {
sd = chan->subdev_on_csi;
node = sd->dev->of_node;
s_data = to_camera_common_data(sd->dev);
/* get sensor properties from DT */
if (s_data != NULL && node != NULL) {
int idx = s_data->mode_prop_idx;
emb_buf_size = 0;
if (idx < s_data->sensor_props.\
num_modes) {
sensor_mode =
&s_data->sensor_props.\
sensor_modes[idx];
chan->embedded_data_width =
sensor_mode->\
image_properties.width;
chan->embedded_data_height =
sensor_mode->\
image_properties.\
embedded_metadata_height;
/* rounding up to page size */
emb_buf_size =
round_up(chan->\
embedded_data_width *
chan->\
embedded_data_height *
BPP_MEM, PAGE_SIZE);
}
}
/* Allocate buffer for Embedded Data if need to*/
if (emb_buf_size > chan->emb_buf_size) {
struct device *vi_unit_dev;
vi5_unit_get_device_handle(\
chan->vi->ndev, chan->port[0],\
&vi_unit_dev);
/*
* if old buffer is smaller than what we need,
* release the old buffer and re-allocate a
* bigger one below.
*/
if (chan->emb_buf_size > 0) {
dma_free_coherent(vi_unit_dev,
chan->emb_buf_size,
chan->emb_buf_addr,
chan->emb_buf);
chan->emb_buf_size = 0;
}
chan->emb_buf_addr =
dma_alloc_coherent(vi_unit_dev,
emb_buf_size,
&chan->emb_buf, GFP_KERNEL);
if (!chan->emb_buf_addr) {
dev_err(&chan->video->dev,
"Can't allocate memory"
"for embedded data\n");
goto err_setup;
}
chan->emb_buf_size = emb_buf_size;
}
}
ret = tegra_channel_capture_setup(chan, vi_port);
if (ret < 0)
goto err_setup;
}
chan->sequence = 0;
tegra_channel_init_ring_buffer(chan);
ret = vi5_channel_start_kthreads(chan);
if (ret != 0)
goto err_start_kthreads;
}
/* csi stream/sensor devices should be streamon post vi channel setup */
ret = tegra_channel_set_stream(chan, true);
if (ret < 0)
goto err_set_stream;
ret = tegra_channel_write_blobs(chan);
if (ret < 0)
goto err_write_blobs;
return 0;
err_write_blobs:
tegra_channel_set_stream(chan, false);
err_set_stream:
if (!chan->bypass)
vi5_channel_stop_kthreads(chan);
err_start_kthreads:
if (!chan->bypass)
for (vi_port = 0; vi_port < chan->valid_ports; vi_port++)
vi_capture_release(chan->tegra_vi_channel[vi_port],
CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE);
err_setup:
if (!chan->bypass)
for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) {
vi_channel_close_ex(chan->id + vi_port, chan->tegra_vi_channel[vi_port]);
chan->tegra_vi_channel[vi_port] = NULL;
}
err_open_ex:
vq->start_streaming_called = 0;
tegra_channel_queued_buf_done(chan, VB2_BUF_STATE_QUEUED, false);
return ret;
}
static int vi5_channel_stop_streaming(struct vb2_queue *vq)
{
struct tegra_channel *chan = vb2_get_drv_priv(vq);
long err;
int vi_port = 0;
if (!chan->bypass)
vi5_channel_stop_kthreads(chan);
/* csi stream/sensor(s) devices to be closed before vi channel */
tegra_channel_set_stream(chan, false);
if (!chan->bypass) {
for (vi_port = 0; vi_port < chan->valid_ports; vi_port++) {
err = vi_capture_release(chan->tegra_vi_channel[vi_port],
CAPTURE_CHANNEL_RESET_FLAG_IMMEDIATE);
if (err)
dev_err(&chan->video->dev,
"vi capture release failed\n");
vi_channel_close_ex(chan->id + vi_port, chan->tegra_vi_channel[vi_port]);
chan->tegra_vi_channel[vi_port] = NULL;
}
/* release all remaining buffers to v4l2 */
tegra_channel_queued_buf_done(chan, VB2_BUF_STATE_ERROR, false);
}
return 0;
}
int tegra_vi5_enable(struct tegra_mc_vi *vi)
{
int ret;
ret = tegra_camera_emc_clk_enable();
if (ret)
goto err_emc_enable;
return 0;
err_emc_enable:
return ret;
}
void tegra_vi5_disable(struct tegra_mc_vi *vi)
{
tegra_channel_ec_close(vi);
tegra_camera_emc_clk_disable();
}
static int vi5_power_on(struct tegra_channel *chan)
{
int ret = 0;
struct tegra_mc_vi *vi;
struct tegra_csi_device *csi;
vi = chan->vi;
csi = vi->csi;
ret = tegra_vi5_enable(vi);
if (ret < 0)
return ret;
ret = tegra_channel_set_power(chan, 1);
if (ret < 0) {
dev_err(vi->dev, "Failed to power on subdevices\n");
return ret;
}
return 0;
}
static void vi5_power_off(struct tegra_channel *chan)
{
int ret = 0;
struct tegra_mc_vi *vi;
struct tegra_csi_device *csi;
vi = chan->vi;
csi = vi->csi;
ret = tegra_channel_set_power(chan, 0);
if (ret < 0)
dev_err(vi->dev, "Failed to power off subdevices\n");
tegra_vi5_disable(vi);
}
struct tegra_vi_fops vi5_fops = {
.vi_power_on = vi5_power_on,
.vi_power_off = vi5_power_off,
.vi_start_streaming = vi5_channel_start_streaming,
.vi_stop_streaming = vi5_channel_stop_streaming,
.vi_setup_queue = vi5_channel_setup_queue,
.vi_error_recover = vi5_channel_error_recover,
.vi_add_ctrls = vi5_add_ctrls,
.vi_init_video_formats = vi5_init_video_formats,
.vi_unit_get_device_handle = vi5_unit_get_device_handle,
};
EXPORT_SYMBOL(vi5_fops);

View File

@@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra Video Input 5 device common APIs
*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __T186_VI5_H__
#define __T186_VI5_H__
extern struct tegra_vi_fops vi5_fops;
#endif

View File

@@ -0,0 +1,141 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NVIDIA Tegra Video Input Device Driver VI5 formats
*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __VI5_FORMATS_H_
#define __VI5_FORMATS_H_
#include <media/tegra_camera_core.h>
/*
* These go into the VI_CHn_PIXFMT_FORMAT register bits 7:0
* Output pixel memory format for the VI channel.
*/
enum tegra_image_format {
TEGRA_IMAGE_FORMAT_T_R5G6B5 = 1,
TEGRA_IMAGE_FORMAT_T_B5G6R5,
TEGRA_IMAGE_FORMAT_T_R8 = 5,
TEGRA_IMAGE_FORMAT_T_A8B8G8R8 = 8,
TEGRA_IMAGE_FORMAT_T_A8R8G8B8,
TEGRA_IMAGE_FORMAT_T_B8G8R8A8,
TEGRA_IMAGE_FORMAT_T_R8G8B8A8,
TEGRA_IMAGE_FORMAT_T_Y8_U8__Y8_V8 = 16,
TEGRA_IMAGE_FORMAT_T_Y8_V8__Y8_U8,
TEGRA_IMAGE_FORMAT_T_V8_Y8__U8_Y8,
TEGRA_IMAGE_FORMAT_T_U8_Y8__V8_Y8,
TEGRA_IMAGE_FORMAT_T_Y8__U8V8_N420 = 34,
TEGRA_IMAGE_FORMAT_T_Y8__V8U8_N420,
TEGRA_IMAGE_FORMAT_T_B5G5R5A1 = 42,
TEGRA_IMAGE_FORMAT_T_R5G5B5A1,
TEGRA_IMAGE_FORMAT_T_Y8__U8V8_N422,
TEGRA_IMAGE_FORMAT_T_Y8__V8U8_N422,
TEGRA_IMAGE_FORMAT_T_Y8__U8__V8_N422,
TEGRA_IMAGE_FORMAT_T_Y8__U8__V8_N420,
TEGRA_IMAGE_FORMAT_T_DPCM_RAW10 = 64,
TEGRA_IMAGE_FORMAT_T_A2B10G10R10 = 68,
TEGRA_IMAGE_FORMAT_T_A2R10G10B10,
TEGRA_IMAGE_FORMAT_T_B10G10R10A2,
TEGRA_IMAGE_FORMAT_T_R10G10B10A2,
TEGRA_IMAGE_FORMAT_T_A4B4G4R4 = 80,
TEGRA_IMAGE_FORMAT_T_A4R4G4B4,
TEGRA_IMAGE_FORMAT_T_B4G4R4A4,
TEGRA_IMAGE_FORMAT_T_R4G4B4A4,
TEGRA_IMAGE_FORMAT_T_A1B5G5R5,
TEGRA_IMAGE_FORMAT_T_A1R5G5B5,
TEGRA_IMAGE_FORMAT_T_Y10__V10U10_N420 = 98,
TEGRA_IMAGE_FORMAT_T_Y10__U10V10_N420,
TEGRA_IMAGE_FORMAT_T_Y10__U10__V10_N420,
TEGRA_IMAGE_FORMAT_T_Y10__V10U10_N422,
TEGRA_IMAGE_FORMAT_T_Y10__U10V10_N422,
TEGRA_IMAGE_FORMAT_T_Y10__U10__V10_N422,
TEGRA_IMAGE_FORMAT_T_DPCM_RAW12 = 128,
TEGRA_IMAGE_FORMAT_T_R16_ISP = 194,
TEGRA_IMAGE_FORMAT_T_R16_F,
TEGRA_IMAGE_FORMAT_T_R16,
TEGRA_IMAGE_FORMAT_T_R16_I,
TEGRA_IMAGE_FORMAT_T_R32 = 230,
TEGRA_IMAGE_FORMAT_T_R32_F = 232,
TEGRA_IMAGE_FORMAT_T_DPCM_RAW16 = 254,
TEGRA_IMAGE_FORMAT_T_DPCM_RAW20,
};
static const struct tegra_video_format vi5_video_formats[] = {
/* RAW 6: TODO */
/* RAW 7: TODO */
/* RAW 8 */
TEGRA_VIDEO_FORMAT(RAW8, 8, SRGGB8_1X8, 1, 1, T_R8,
RAW8, SRGGB8, "RGRG.. GBGB.."),
TEGRA_VIDEO_FORMAT(RAW8, 8, SGRBG8_1X8, 1, 1, T_R8,
RAW8, SGRBG8, "GRGR.. BGBG.."),
TEGRA_VIDEO_FORMAT(RAW8, 8, SGBRG8_1X8, 1, 1, T_R8,
RAW8, SGBRG8, "GBGB.. RGRG.."),
TEGRA_VIDEO_FORMAT(RAW8, 8, SBGGR8_1X8, 1, 1, T_R8,
RAW8, SBGGR8, "BGBG.. GRGR.."),
/* RAW 10 */
TEGRA_VIDEO_FORMAT(RAW10, 10, SRGGB10_1X10, 2, 1, T_R16,
RAW10, SRGGB10, "RGRG.. GBGB.."),
TEGRA_VIDEO_FORMAT(RAW10, 10, SGRBG10_1X10, 2, 1, T_R16,
RAW10, SGRBG10, "GRGR.. BGBG.."),
TEGRA_VIDEO_FORMAT(RAW10, 10, SGBRG10_1X10, 2, 1, T_R16,
RAW10, SGBRG10, "GBGB.. RGRG.."),
TEGRA_VIDEO_FORMAT(RAW10, 10, SBGGR10_1X10, 2, 1, T_R16,
RAW10, SBGGR10, "BGBG.. GRGR.."),
/* RAW 12 */
TEGRA_VIDEO_FORMAT(RAW12, 12, SRGGB12_1X12, 2, 1, T_R16,
RAW12, SRGGB12, "RGRG.. GBGB.."),
TEGRA_VIDEO_FORMAT(RAW12, 12, SGRBG12_1X12, 2, 1, T_R16,
RAW12, SGRBG12, "GRGR.. BGBG.."),
TEGRA_VIDEO_FORMAT(RAW12, 12, SGBRG12_1X12, 2, 1, T_R16,
RAW12, SGBRG12, "GBGB.. RGRG.."),
TEGRA_VIDEO_FORMAT(RAW12, 12, SBGGR12_1X12, 2, 1, T_R16,
RAW12, SBGGR12, "BGBG.. GRGR.."),
/* RGB888 */
TEGRA_VIDEO_FORMAT(RGB888, 24, RGB888_1X24, 4, 1, T_A8R8G8B8,
RGB888, ABGR32, "BGRA-8-8-8-8"),
TEGRA_VIDEO_FORMAT(RGB888, 24, RGB888_1X32_PADHI, 4, 1, T_A8B8G8R8,
RGB888, RGB32, "RGB-8-8-8-8"),
/* YUV422 */
TEGRA_VIDEO_FORMAT(YUV422, 16, UYVY8_1X16, 2, 1, T_U8_Y8__V8_Y8,
YUV422_8, UYVY, "YUV 4:2:2"),
TEGRA_VIDEO_FORMAT(YUV422, 16, VYUY8_1X16, 2, 1, T_V8_Y8__U8_Y8,
YUV422_8, VYUY, "YUV 4:2:2"),
TEGRA_VIDEO_FORMAT(YUV422, 16, YUYV8_1X16, 2, 1, T_Y8_U8__Y8_V8,
YUV422_8, YUYV, "YUV 4:2:2"),
TEGRA_VIDEO_FORMAT(YUV422, 16, YVYU8_1X16, 2, 1, T_Y8_V8__Y8_U8,
YUV422_8, YVYU, "YUV 4:2:2"),
TEGRA_VIDEO_FORMAT(YUV422, 16, UYVY8_1X16, 1, 1, T_Y8__V8U8_N422,
YUV422_8, NV16, "NV16"),
TEGRA_VIDEO_FORMAT(YUV422, 16, UYVY8_2X8, 2, 1, T_U8_Y8__V8_Y8,
YUV422_8, UYVY, "YUV 4:2:2 UYVY"),
TEGRA_VIDEO_FORMAT(YUV422, 16, VYUY8_2X8, 2, 1, T_V8_Y8__U8_Y8,
YUV422_8, VYUY, "YUV 4:2:2 VYUY"),
TEGRA_VIDEO_FORMAT(YUV422, 16, YUYV8_2X8, 2, 1, T_Y8_U8__Y8_V8,
YUV422_8, YUYV, "YUV 4:2:2 YUYV"),
TEGRA_VIDEO_FORMAT(YUV422, 16, YVYU8_2X8, 2, 1, T_Y8_V8__Y8_U8,
YUV422_8, YVYU, "YUV 4:2:2 YVYU"),
};
#endif

View File

@@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION, All rights reserved.
*/
#ifndef MIPI_CAL_H
#define MIPI_CAL_H
#define DSID (1 << 31)
#define DSIC (1 << 30)
#define DSIB (1 << 29)
#define DSIA (1 << 28)
#define CSIH (1 << 27)
#define CSIG (1 << 26)
#define CSIF (1 << 25)
#define CSIE (1 << 24)
#define CSID (1 << 23)
#define CSIC (1 << 22)
#define CSIB (1 << 21)
#define CSIA (1 << 20)
#define CPHY_MASK 1
#ifdef CONFIG_TEGRA_MIPI_CAL
int tegra_mipi_bias_pad_enable(void);
int tegra_mipi_bias_pad_disable(void);
int tegra_mipi_calibration(int lanes);
int tegra_mipi_poweron(bool enable);
#else
static inline int tegra_mipi_bias_pad_enable(void)
{
return 0;
}
static inline int tegra_mipi_bias_pad_disable(void)
{
return 0;
}
static inline int tegra_mipi_calibration(int lanes)
{
return 0;
}
static inline int tegra_mipi_poweron(bool enable)
{
return 0;
}
#endif
#endif

View File

@@ -6,3 +6,4 @@ obj-m += host/nvdla/
obj-m += host/pva/
obj-m += tsec/
obj-m += dc/bridge/
obj-m += camera/

View File

@@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/host
LINUXINCLUDE += -DCONFIG_TEGRA_HOST1X
obj-m += tegra_camera_platform.o

View File

@@ -0,0 +1,260 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
#include <linux/slab.h>
#include <media/tegra_camera_dev_mfi.h>
static LIST_HEAD(cmfidev_list);
static DEFINE_MUTEX(cmfidev_mutex);
static int tegra_camera_dev_mfi_init(void)
{
INIT_LIST_HEAD(&cmfidev_list);
return 0;
}
void tegra_camera_dev_mfi_cb(void *stub)
{
u32 idx = 0;
struct camera_mfi_dev *itr = NULL;
int err = 0;
mutex_lock(&cmfidev_mutex);
list_for_each_entry(itr, &cmfidev_list, list) {
if (itr->regmap) {
/* MFI driver has to delay the focuser writes by one
* frame, which is required to get sync in focus
* position and sharpness.
* So write previous frame focuser settings in current
* frame's callback, and then save current frame focuser
* writes for next callback.
*/
for (idx = 0; idx < itr->prev_num_used; idx++) {
err = regmap_write(itr->regmap,
itr->prev_reg[idx].addr,
itr->prev_reg[idx].val);
if (err)
pr_err("%s: [%s] regmap_write failed\n",
__func__, itr->name);
}
/* Consume current settings, which would be programmed
* in next frame callback.
*/
for (idx = 0; idx < itr->num_used; idx++) {
itr->prev_reg[idx].addr = itr->reg[idx].addr;
itr->prev_reg[idx].val = itr->reg[idx].val;
}
itr->prev_num_used = itr->num_used;
} else if (itr->i2c_client) {
for (idx = 0; idx < itr->num_used; idx++) {
err = i2c_transfer(itr->i2c_client->adapter,
&itr->msg[idx].msg, 1);
if (err != 1)
pr_err("%s: [%s] i2c_transfer failed\n",
__func__, itr->name);
}
} else {
pr_err("%s [%s] Unknown device mechanism\n",
__func__, itr->name);
}
itr->num_used = 0;
}
mutex_unlock(&cmfidev_mutex);
}
EXPORT_SYMBOL(tegra_camera_dev_mfi_cb);
int tegra_camera_dev_mfi_wr_add_i2c(
struct camera_mfi_dev *cmfidev,
struct i2c_msg *msg, int num)
{
int err = -ENODEV;
int i = 0;
struct camera_mfi_dev *itr = NULL;
if (!strcmp(cmfidev->name, "")) {
err = -EINVAL;
goto cmfi_wr_add_i2c_end;
}
mutex_lock(&cmfidev_mutex);
list_for_each_entry(itr, &cmfidev_list, list) {
if (!strcmp(itr->name, cmfidev->name)) {
if (itr->num_used == CAMERA_REGCACHE_MAX)
err = -ENOSPC;
else {
for (i = 0; i < num; i++) {
itr->msg[itr->num_used].msg = msg[i];
memcpy(itr->msg[itr->num_used].buf,
msg[i].buf,
msg[i].len);
itr->msg[itr->num_used].msg.buf =
itr->msg[itr->num_used].buf;
itr->num_used++;
}
err = 0;
}
}
}
mutex_unlock(&cmfidev_mutex);
cmfi_wr_add_i2c_end:
return err;
}
EXPORT_SYMBOL(tegra_camera_dev_mfi_wr_add_i2c);
int tegra_camera_dev_mfi_wr_add(
struct camera_mfi_dev *cmfidev,
u32 offset, u32 val)
{
int err = -ENODEV;
struct camera_mfi_dev *itr = NULL;
if (!strcmp(cmfidev->name, "")) {
err = -EINVAL;
goto cmfi_wr_add_end;
}
mutex_lock(&cmfidev_mutex);
list_for_each_entry(itr, &cmfidev_list, list) {
if (!strcmp(itr->name, cmfidev->name)) {
if (itr->num_used == CAMERA_REGCACHE_MAX) {
err = -ENOSPC;
} else {
itr->reg[itr->num_used].addr = offset;
itr->reg[itr->num_used].val = val;
itr->num_used++;
err = 0;
}
}
}
mutex_unlock(&cmfidev_mutex);
cmfi_wr_add_end:
return err;
}
EXPORT_SYMBOL(tegra_camera_dev_mfi_wr_add);
int tegra_camera_dev_mfi_clear(struct camera_mfi_dev *cmfidev)
{
int err = -ENODEV;
struct camera_mfi_dev *itr = NULL;
if (cmfidev == NULL) {
err = -EINVAL;
goto cmfidev_clear_end;
}
if (!strcmp(cmfidev->name, "")) {
err = -EINVAL;
goto cmfidev_clear_end;
}
mutex_lock(&cmfidev_mutex);
list_for_each_entry(itr, &cmfidev_list, list) {
if (!strcmp(itr->name, cmfidev->name)) {
if (itr->num_used > 0)
pr_info("%s [%s] force clear Q pending writes\n",
__func__, itr->name);
itr->num_used = 0;
err = 0;
}
}
mutex_unlock(&cmfidev_mutex);
cmfidev_clear_end:
return err;
}
EXPORT_SYMBOL(tegra_camera_dev_mfi_clear);
int tegra_camera_dev_mfi_add_i2cclient(
struct camera_mfi_dev **cmfidev,
u8 *name,
struct i2c_client *i2c_client)
{
int err = 0;
struct camera_mfi_dev *itr = NULL;
struct camera_mfi_dev *new_cmfidev = NULL;
if (name == NULL || !strcmp(name, ""))
return -EINVAL;
mutex_lock(&cmfidev_mutex);
list_for_each_entry(itr, &cmfidev_list, list) {
if (!strcmp(itr->name, name)) {
err = -EEXIST;
goto cmfidev_add_i2c_unlock;
}
}
if (!err) {
new_cmfidev =
kzalloc(sizeof(struct camera_mfi_dev), GFP_KERNEL);
if (!new_cmfidev) {
pr_err("%s memory low!\n", __func__);
err = -ENOMEM;
goto cmfidev_add_i2c_unlock;
}
memset(new_cmfidev, 0, sizeof(struct camera_mfi_dev));
strncpy(new_cmfidev->name, name, sizeof(new_cmfidev->name)-1);
INIT_LIST_HEAD(&new_cmfidev->list);
new_cmfidev->i2c_client = i2c_client;
new_cmfidev->num_used = 0;
list_add(&new_cmfidev->list, &cmfidev_list);
}
*cmfidev = new_cmfidev;
cmfidev_add_i2c_unlock:
mutex_unlock(&cmfidev_mutex);
return err;
}
EXPORT_SYMBOL(tegra_camera_dev_mfi_add_i2cclient);
int tegra_camera_dev_mfi_add_regmap(
struct camera_mfi_dev **cmfidev,
u8 *name,
struct regmap *regmap)
{
int err = 0;
struct camera_mfi_dev *itr = NULL;
struct camera_mfi_dev *new_cmfidev = NULL;
if (name == NULL || !strcmp(name, ""))
return -EINVAL;
mutex_lock(&cmfidev_mutex);
list_for_each_entry(itr, &cmfidev_list, list) {
if (!strcmp(itr->name, name)) {
err = -EEXIST;
goto cmfidev_add_regmap_unlock;
}
}
if (!err) {
new_cmfidev =
kzalloc(sizeof(struct camera_mfi_dev), GFP_KERNEL);
if (!new_cmfidev) {
pr_err("%s memory low!\n", __func__);
err = -ENOMEM;
goto cmfidev_add_regmap_unlock;
}
memset(new_cmfidev, 0, sizeof(struct camera_mfi_dev));
strncpy(new_cmfidev->name, name, sizeof(new_cmfidev->name)-1);
INIT_LIST_HEAD(&new_cmfidev->list);
new_cmfidev->regmap = regmap;
new_cmfidev->num_used = 0;
new_cmfidev->prev_num_used = 0;
if (list_empty(&cmfidev_list))
tegra_camera_dev_mfi_init();
list_add(&new_cmfidev->list, &cmfidev_list);
}
*cmfidev = new_cmfidev;
cmfidev_add_regmap_unlock:
mutex_unlock(&cmfidev_mutex);
return err;
}
EXPORT_SYMBOL(tegra_camera_dev_mfi_add_regmap);
MODULE_LICENSE("GPL");

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,245 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra Graphics Host Driver Entrypoint
*
* Copyright (c) 2010-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef __NVHOST_HOST1X_H
#define __NVHOST_HOST1X_H
#include <linux/cdev.h>
#include <linux/nvhost.h>
#include <uapi/linux/nvhost_ioctl.h>
#include "nvhost_syncpt.h"
#include "nvhost_channel.h"
#include "nvhost_intr.h"
#define TRACE_MAX_LENGTH 128U
#define IFACE_NAME "nvhost"
struct nvhost_chip_support;
struct nvhost_channel;
struct mem_mgr;
extern long linsim_cl;
/*
* Policy determines how do we store the syncpts,
* i.e. either per channel (in struct nvhost_channel)
* or per channel instance (in struct nvhost_channel_userctx)
*/
enum nvhost_syncpt_policy {
SYNCPT_PER_CHANNEL = 0,
SYNCPT_PER_CHANNEL_INSTANCE,
};
/*
* Policy determines when to map HW channel to device,
* i.e. either on channel device node open time
* or on work submission time
*/
enum nvhost_channel_policy {
MAP_CHANNEL_ON_OPEN = 0,
MAP_CHANNEL_ON_SUBMIT,
};
struct host1x_device_info {
/* Channel info */
int nb_channels; /* host1x: num channels supported */
int ch_base; /* host1x: channel base */
int ch_limit; /* host1x: channel limit */
enum nvhost_channel_policy channel_policy; /* host1x: channel policy */
/* Syncpoint info */
int nb_hw_pts; /* host1x: num syncpoints supported
in h/w */
int nb_pts; /* host1x: num syncpoints supported
in s/w where nb_pts <= nb_hw_pts */
int pts_base; /* host1x: syncpoint base */
int pts_limit; /* host1x: syncpoint limit */
int nb_syncpt_irqs; /* host1x: number of syncpoint irqs */
enum nvhost_syncpt_policy syncpt_policy; /* host1x: syncpoint policy */
int nb_mlocks; /* host1x: number of mlocks */
int (*initialize_chip_support)(struct nvhost_master *,
struct nvhost_chip_support *);
int nb_actmons;
/* true if host1x access direct but engines are not owned */
bool vmserver_owns_engines;
/* true if hw supports remote syncpoint interrupts */
bool use_cross_vm_interrupts;
/* host1x: reg resources */
char *resources[NVHOST_MODULE_MAX_IORESOURCE_MEM];
/* host1x: number of resources */
int nb_resources;
/* cmdfifo only accessible from hypervisor? */
bool secure_cmdfifo;
/* ctrl device node name if not default */
const char *ctrl_name;
/* Size of a syncpoint page in the syncpoint<->mss interface */
uint32_t syncpt_page_size;
/* If MLOCK locked-state can be written through register */
bool rw_mlock_register;
};
struct nvhost_master {
void __iomem *aperture;
void __iomem *sync_aperture;
void __iomem *actmon_aperture;
struct class *nvhost_class;
struct cdev cdev;
struct device *ctrl;
struct nvhost_syncpt syncpt;
struct nvhost_intr intr;
struct platform_device *dev;
atomic_t clientid;
struct host1x_device_info info;
struct nvhost_characteristics nvhost_char;
struct kobject *caps_kobj;
struct nvhost_capability_node *caps_nodes;
int major;
int next_minor;
struct mutex chrdev_mutex;
struct mutex timeout_mutex;
struct nvhost_channel **chlist; /* channel list */
struct mutex chlist_mutex; /* mutex for channel list */
struct mutex ch_alloc_mutex; /* mutex for channel allocation */
struct semaphore free_channels; /* Semaphore tracking free channels */
unsigned long allocated_channels[2];
/* nvhost vm specific structures */
struct list_head vm_list;
struct mutex vm_mutex;
struct mutex vm_alloc_mutex;
/* for nvhost_masters list */
struct list_head list;
struct rb_root syncpt_backing_head;
};
#ifdef CONFIG_DEBUG_FS
void nvhost_debug_init(struct nvhost_master *master);
void nvhost_device_debug_init(struct platform_device *dev);
void nvhost_device_debug_deinit(struct platform_device *dev);
void nvhost_debug_dump(struct nvhost_master *master);
#else
static inline void nvhost_debug_init(struct nvhost_master *master)
{
}
static inline void nvhost_device_debug_init(struct platform_device *dev)
{
}
static inline void nvhost_device_debug_deinit(struct platform_device *dev)
{
}
static inline void nvhost_debug_dump(struct nvhost_master *master)
{
}
#endif
int nvhost_host1x_finalize_poweron(struct platform_device *dev);
int nvhost_host1x_prepare_poweroff(struct platform_device *dev);
void nvhost_set_chanops(struct nvhost_channel *ch);
int nvhost_gather_filter_enabled(struct nvhost_syncpt *sp);
int nvhost_update_characteristics(struct platform_device *dev);
static inline void *nvhost_get_falcon_data(struct platform_device *_dev)
{
struct nvhost_device_data *pdata =
(struct nvhost_device_data *)platform_get_drvdata(_dev);
WARN_ON(!pdata);
return pdata ? pdata->falcon_data : NULL;
}
static inline void nvhost_set_falcon_data(struct platform_device *_dev,
void *priv_data)
{
struct nvhost_device_data *pdata =
(struct nvhost_device_data *)platform_get_drvdata(_dev);
WARN_ON(!pdata);
pdata->falcon_data = priv_data;
}
static inline void *nvhost_get_private_data(struct platform_device *_dev)
{
struct nvhost_device_data *pdata =
(struct nvhost_device_data *)platform_get_drvdata(_dev);
WARN_ON(!pdata);
return pdata ? pdata->private_data : NULL;
}
static inline void *nvhost_get_private_data_nowarn(struct platform_device *_dev)
{
struct nvhost_device_data *pdata =
(struct nvhost_device_data *)platform_get_drvdata(_dev);
return pdata ? pdata->private_data : NULL;
}
static inline void nvhost_set_private_data(struct platform_device *_dev,
void *priv_data)
{
struct nvhost_device_data *pdata =
(struct nvhost_device_data *)platform_get_drvdata(_dev);
WARN_ON(!pdata);
pdata->private_data = priv_data;
}
struct nvhost_master *nvhost_get_prim_host(void);
static inline struct nvhost_master *nvhost_get_host(
struct platform_device *_dev)
{
struct device *parent = _dev->dev.parent;
struct device *dev = &_dev->dev;
/*
* host1x has no parent dev on non-DT configuration or has
* platform_bus on DT configuration. So search for a device
* whose parent is NULL or platform_bus
*/
while (parent && parent != &platform_bus) {
dev = parent;
parent = parent->parent;
}
return nvhost_get_private_data(to_platform_device(dev));
}
static inline struct nvhost_master *nvhost_get_host_nowarn(
struct platform_device *_dev)
{
struct device *parent = _dev->dev.parent;
struct device *dev = &_dev->dev;
/*
* host1x has no parent dev on non-DT configuration or has
* platform_bus on DT configuration. So search for a device
* whose parent is NULL or platform_bus
*/
while (parent && parent != &platform_bus) {
dev = parent;
parent = parent->parent;
}
return nvhost_get_private_data_nowarn(to_platform_device(dev));
}
static inline struct platform_device *nvhost_get_parent(
struct platform_device *_dev)
{
return (_dev->dev.parent && _dev->dev.parent != &platform_bus)
? to_platform_device(_dev->dev.parent) : NULL;
}
struct nvhost_master *nvhost_get_syncpt_owner(u32 id);
struct nvhost_syncpt *nvhost_get_syncpt_owner_struct(u32 id,
struct nvhost_syncpt *default_syncpt);
#endif

View File

@@ -0,0 +1,160 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Deskew driver
*
* Copyright (c) 2018-2022 NVIDIA Corporation. All rights reserved.
*/
#ifndef __DESKEW_H__
#define __DESKEW_H__
#include <linux/completion.h>
#include <uapi/linux/nvhost_nvcsi_ioctl.h>
#include <media/csi.h>
////////////////////////////////////////////////////////////////
// STREAM REGISTERS
////////////////////////////////////////////////////////////////
#define NVCSI_STREAM_0_ERROR_STATUS2VI_MASK regs[0]
#define NVCSI_STREAM_1_ERROR_STATUS2VI_MASK regs[1]
#define CFG_ERR_STATUS2VI_MASK_ALL regs[2]
////////////////////////////////////////////////////////////////
// PHY REGISTERS
////////////////////////////////////////////////////////////////
// PHY INTERRUPTS REGISTERS
#define NVCSI_PHY_0_CILA_INTR_STATUS regs[3]
// bits in register NVCSI_PHY_0_CILA_INTR_STATUS
#define intr_dphy_cil_deskew_calib_err_ctrl (1 << 27)
#define intr_dphy_cil_deskew_calib_err_lane1 (1 << 26)
#define intr_dphy_cil_deskew_calib_err_lane0 (1 << 25)
#define intr_dphy_cil_deskew_calib_done_ctrl (1 << 24)
#define intr_dphy_cil_deskew_calib_done_lane1 (1 << 23)
#define intr_dphy_cil_deskew_calib_done_lane0 (1 << 22)
#define NVCSI_PHY_0_CILA_INTR_MASK regs[4]
#define NVCSI_PHY_0_CILB_INTR_STATUS regs[5]
#define NVCSI_PHY_0_CILB_INTR_MASK regs[6]
// new registers in T194
#define T194_NVCSI_PHY_0_CILA_INTR_1_STATUS 0x10404
#define T194_NVCSI_PHY_0_CILA_INTR_1_MASK 0x1040c
#define T194_NVCSI_PHY_0_CILB_INTR_1_STATUS 0x10804
#define T194_NVCSI_PHY_0_CILB_INTR_1_MASK 0x1080c
////////////////////////////////////////////////////////////////
// PHY DESKEW REGISTERS
////////////////////////////////////////////////////////////////
// XXX_OFFSET: address offset from NVCSI_CIL_PHY_CTRL_0
#define NVCSI_PHY_0_NVCSI_CIL_PHY_CTRL_0 regs[7]
#define NVCSI_CIL_A_SW_RESET_0_OFFSET regs[8]
#define NVCSI_CIL_A_CLK_DESKEW_CTRL_0_OFFSET regs[9]
// bits in register NVCSI_CIL_A_CLK_DESKEW_CTRL_0
#define CLK_INADJ_SWEEP_CTRL (0x1 << 15)
#define CLK_INADJ_LIMIT_HIGH (0x3f << 8)
#define CLK_INADJ_LIMIT_LOW 0x3f
#define NVCSI_CIL_A_DPHY_INADJ_CTRL_0_OFFSET regs[10]
// bits in register NVCSI_CIL_A_DPHY_INADJ_CTRL_0
#define SW_SET_DPHY_INADJ_CLK (0x1 << 22)
#define DPHY_INADJ_CLK (0x3f << 16)
#define DPHY_INADJ_CLK_SHIFT 16
#define SW_SET_DPHY_INADJ_IO1 (0x1 << 14)
#define DPHY_INADJ_IO1 (0x3f << 8)
#define DPHY_INADJ_IO1_SHIFT 8
#define SW_SET_DPHY_INADJ_IO0 (0x1 << 6)
#define DPHY_INADJ_IO0 0x3f
#define DPHY_INADJ_IO0_SHIFT 0
#define NVCSI_CIL_A_DATA_DESKEW_CTRL_0_OFFSET regs[11]
// bits in register NVCSI_CIL_A_DATA_DESKEW_CTRL_0
#define DATA_INADJ_SWEEP_CTRL1 (0x1 << 31)
#define DATA_INADJ_SWEEP_CTRL0 (0x1 << 15)
#define DATA_INADJ_LIMIT_HIGH1 (0x3f << 23)
#define DATA_INADJ_LIMIT_HIGH0 (0x3f << 8)
#define NVCSI_CIL_A_DPHY_DESKEW_STATUS_0_OFFSET regs[12]
// bits in register NVCSI_CIL_A_DPHY_DESKEW_STATUS_0
#define DPHY_CALIB_ERR_IO1 (0x1 << 15)
#define DPHY_CALIB_DONE_IO1 (0x1 << 14)
#define DPHY_CALIB_ERR_IO0 (0x1 << 7)
#define DPHY_CALIB_DONE_IO0 (0x1 << 6)
#define NVCSI_CIL_A_DPHY_DESKEW_DATA_CALIB_STATUS_LOW_0_0_OFFSET regs[13]
#define NVCSI_CIL_A_DPHY_DESKEW_DATA_CALIB_STATUS_HIGH_0_0_OFFSET regs[14]
#define NVCSI_CIL_A_DPHY_DESKEW_CLK_CALIB_STATUS_LOW_0_0_OFFSET regs[15]
#define NVCSI_CIL_A_DPHY_DESKEW_CLK_CALIB_STATUS_HIGH_0_0_OFFSET regs[16]
// only for t194+
#define NVCSI_CIL_A_DPHY_DESKEW_RESULT_STATUS_OFFSET 0x64
#define NVCSI_CIL_B_DPHY_DESKEW_RESULT_STATUS_OFFSET 0xf0
/*
* NVCSI_PHY_0_NVCSI_CIL_A_DESKEW_CONTROL_0 was introduced in T194
* Use this register for DESKEW_COMPARE and DESKEW_SETTLE
*/
#define NVCSI_CIL_A_DESKEW_CONTROL_0_OFFSET regs[17]
#define NVCSI_CIL_A_CONTROL_0_OFFSET regs[18]
/*
* bits in NVCSI_CIL_A_DESKEW_CONTROL_0/NVCSI_CIL_A_CONTROL_0
* For T194, the THS_SETTLE control was split into
* THS_SETTLE0 and THS_SETTLE1 for per-lane control
*/
#define DESKEW_COMPARE regs[19]
#define DESKEW_COMPARE_SHIFT regs[20]
#define DESKEW_SETTLE regs[21]
#define DESKEW_SETTLE_SHIFT regs[22]
#define CLK_SETTLE regs[23]
#define CLK_SETTLE_SHIFT0 regs[24]
#define THS_SETTLE0 regs[25]
#define THS_SETTLE1 regs[26]
#define THS_SETTLE0_SHIFT regs[27]
#define THS_SETTLE1_SHIFT regs[28]
#define NVCSI_CIL_B_DPHY_INADJ_CTRL_0_OFFSET regs[29]
#define NVCSI_CIL_B_CLK_DESKEW_CTRL_0_OFFSET regs[30]
#define NVCSI_CIL_B_DATA_DESKEW_CTRL_0_OFFSET regs[31]
#define NVCSI_CIL_B_DPHY_DESKEW_STATUS_0_OFFSET regs[32]
// same note as above for NVCSI_CIL_A_DESKEW_CONTROL_0
#define NVCSI_CIL_B_DESKEW_CONTROL_0_OFFSET regs[33]
#define NVCSI_CIL_B_CONTROL_0_OFFSET regs[34]
#define NVCSI_DPHY_CALIB_STATUS_IO_OFFSET 0x8
#define NVCSI_PHY_OFFSET 0x10000
#define NVCSI_CIL_B_OFFSET regs[35]
#define REGS_COUNT 36
////////
#define DESKEW_TIMEOUT_MSEC 100
struct nvcsi_deskew_context {
unsigned int deskew_lanes;
struct task_struct *deskew_kthread;
struct completion thread_done;
};
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_NVCSI)
int nvcsi_deskew_apply_check(struct nvcsi_deskew_context *ctx);
int nvcsi_deskew_setup(struct nvcsi_deskew_context *ctx);
#else
static int inline nvcsi_deskew_apply_check(struct nvcsi_deskew_context *ctx)
{
return 0;
}
static int inline nvcsi_deskew_setup(struct nvcsi_deskew_context *ctx)
{
return 0;
}
#endif
void nvcsi_deskew_platform_setup(struct tegra_csi_device *dev, bool is_t19x);
void deskew_dbgfs_calc_bound(struct seq_file *s, long long input_stats);
void deskew_dbgfs_deskew_stats(struct seq_file *s);
#endif

View File

@@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra Graphics Host NVCSI
*
* Copyright (c) 2015-2022 NVIDIA Corporation. All rights reserved.
*/
#ifndef __NVHOST_NVCSI_H__
#define __NVHOST_NVCSI_H__
#define CFG_ERR_STATUS2VI_MASK_VC3 (0x1 << 24)
#define CFG_ERR_STATUS2VI_MASK_VC2 (0x1 << 16)
#define CFG_ERR_STATUS2VI_MASK_VC1 (0x1 << 8)
#define CFG_ERR_STATUS2VI_MASK_VC0 (0x1 << 0)
extern const struct file_operations tegra_nvcsi_ctrl_ops;
int nvcsi_finalize_poweron(struct platform_device *pdev);
int nvcsi_prepare_poweroff(struct platform_device *pdev);
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_NVCSI)
int nvcsi_cil_sw_reset(int lanes, int enable);
#else
static int inline nvcsi_cil_sw_reset(int lanes, int enable)
{
return 0;
}
#endif
struct tegra_csi_device *tegra_get_mc_csi(void);
#endif

View File

@@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/version.h>
/*
* Upstream Linux commit bd4fb6d270bc ("arm64: Add support for SB
* barrier and patch in over DSB; ISB sequences") added speculation
* macro 'spec_bar' to inhibit speculation. This has since been removed
* from the upstream kernel starting with Linux v5.13, because there are
* no current users. Define this macro here for NVIDIA drivers to use.
*/
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
SB_BARRIER_INSN"nop\n", \
ARM64_HAS_SB))

View File

@@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __BWMGR_MC_H
#define __BWMGR_MC_H
#include <linux/types.h>
#include <linux/platform/tegra/iso_client.h>
unsigned long bwmgr_apply_efficiency(
unsigned long bw, unsigned long iso_bw,
unsigned long emc_max, u64 usage_flags,
unsigned long *iso_bw_min, unsigned long iso_bw_nvdis,
unsigned long iso_bw_vi);
void bwmgr_eff_init(void);
unsigned long bwmgr_freq_to_bw(unsigned long freq);
unsigned long bwmgr_bw_to_freq(unsigned long bw);
unsigned long bwmgr_get_lowest_iso_emc_freq(long iso_bw,
long iso_bw_nvdis, long iso_bw_vi);
u32 tegra_bwmgr_get_max_iso_bw(enum tegra_iso_client);
u32 bwmgr_dvfs_latency(u32 ufreq);
int bwmgr_iso_bw_percentage_max(void);
int bwmgr_get_emc_to_dram_freq_factor(void);
#endif /* __BWMGR_MC_H */

View File

@@ -0,0 +1,204 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef _INCLUDE_MACH_ISOMGR_H
#define _INCLUDE_MACH_ISOMGR_H
#include <linux/platform/tegra/emc_bwmgr.h>
#include <linux/platform/tegra/iso_client.h>
#define ISOMGR_MAGIC 0x150A1C
/* handle to identify registered client */
#define tegra_isomgr_handle void *
/* callback to client to renegotiate ISO BW allocation */
typedef void (*tegra_isomgr_renegotiate)(void *priv,
u32 avail_bw); /* KB/sec */
struct isoclient_info {
enum tegra_iso_client client;
char *name;
char *dev_name;
char *emc_clk_name;
enum tegra_bwmgr_client_id bwmgr_id;
};
struct isomgr_client {
u32 magic; /* magic to identify handle */
struct kref kref; /* ref counting */
s32 dedi_bw; /* BW dedicated to this client (KB/sec) */
s32 rsvd_bw; /* BW reserved for this client (KB/sec) */
s32 real_bw; /* BW realized for this client (KB/sec) */
s32 lti; /* Client spec'd Latency Tolerance (usec) */
s32 lto; /* MC calculated Latency Tolerance (usec) */
s32 rsvd_mf; /* reserved minimum freq in support of LT */
s32 real_mf; /* realized minimum freq in support of LT */
s32 real_mf_rq; /* real_mf requested */
tegra_isomgr_renegotiate renegotiate; /* ask client to renegotiate */
bool realize; /* bw realization in progress */
s32 sleep_bw; /* sleeping for realize */
s32 margin_bw; /* BW set aside for this client (KB/sec) */
u8 limit_bw_percentage; /* Insufficient HW buffers cause BW to be
* limited to this percentage of DRAM BW
*/
void *priv; /* client driver's private data */
struct completion cmpl; /* so we can sleep waiting for delta BW */
#ifdef CONFIG_COMMON_CLK
struct tegra_bwmgr_client *bwmgr_handle;
#else
struct clk *emc_clk; /* client emc clk for bw */
#endif
#ifdef CONFIG_TEGRA_ISOMGR_SYSFS
struct kobject *client_kobj;
struct isomgr_client_attrs {
struct kobj_attribute dedi_bw;
struct kobj_attribute rsvd_bw;
struct kobj_attribute real_bw;
struct kobj_attribute lti;
struct kobj_attribute lto;
struct kobj_attribute rsvd_mf;
struct kobj_attribute real_mf;
struct kobj_attribute sleep_bw;
struct kobj_attribute margin_bw;
} client_attrs;
#endif /* CONFIG_TEGRA_ISOMGR_SYSFS */
};
struct isomgr {
struct mutex lock; /* to lock ALL isomgr state */
struct task_struct *task; /* check reentrant/mismatched locks */
#ifdef CONFIG_COMMON_CLK
struct tegra_bwmgr_client *bwmgr_handle;
#else
struct clk *emc_clk; /* isomgr emc clock for floor freq */
#endif
s32 lt_mf; /* min freq to support worst LT */
s32 lt_mf_rq; /* requested lt_mf */
s32 avail_bw; /* globally available MC BW */
s32 dedi_bw; /* total BW 'dedicated' to clients */
s32 sleep_bw; /* pending bw requirement */
u32 max_iso_bw; /* max ISO BW MC can accommodate */
struct kobject *kobj; /* for sysfs linkage */
struct isomgr_ops *ops; /* ops structure for isomgr*/
};
extern struct isoclient_info *isoclient_info;
/*platform specific flag for requesting max emc floor req for camera client*/
extern u8 isomgr_camera_max_floor_req;
extern int isoclients;
extern bool client_valid[TEGRA_ISO_CLIENT_COUNT];
extern struct isomgr_client isomgr_clients[TEGRA_ISO_CLIENT_COUNT];
extern struct isomgr isomgr;
extern char *cname[];
struct isomgr_ops {
void (*isomgr_plat_init)(void);
bool (*isomgr_plat_register)(u32 dedi_bw,
enum tegra_iso_client client);
void (*isomgr_plat_unregister)(struct isomgr_client *cp);
bool (*isomgr_plat_reserve)(struct isomgr_client *cp,
u32 bw, enum tegra_iso_client client);
bool (*isomgr_plat_realize)(struct isomgr_client *cp);
u32 (*isomgr_max_iso_bw)(enum tegra_iso_client client);
};
struct isomgr_ops *pre_t19x_isomgr_init(void);
struct isomgr_ops *t19x_isomgr_init(void);
#if defined(CONFIG_TEGRA_ISOMGR)
/* Register an ISO BW client */
tegra_isomgr_handle tegra_isomgr_register(enum tegra_iso_client client,
u32 dedicated_bw, /* KB/sec */
tegra_isomgr_renegotiate renegotiate,
void *priv);
/* Unregister an ISO BW client */
void tegra_isomgr_unregister(tegra_isomgr_handle handle);
/* Return the initialization status of isomgr */
bool tegra_isomgr_init_status(void);
/* Reserve ISO BW on behalf of client - don't apply, rval is dvfs thresh usec */
u32 tegra_isomgr_reserve(tegra_isomgr_handle handle,
u32 bw, /* KB/sec */
u32 lt); /* usec */
/* Realize client reservation - apply settings, rval is dvfs thresh usec */
u32 tegra_isomgr_realize(tegra_isomgr_handle handle);
/* This sets bw aside for the client specified. */
int tegra_isomgr_set_margin(enum tegra_iso_client client, u32 bw, bool wait);
int tegra_isomgr_get_imp_time(enum tegra_iso_client, u32 bw);
/* returns available in iso bw in KB/sec */
u32 tegra_isomgr_get_available_iso_bw(void);
/* returns total iso bw in KB/sec */
u32 tegra_isomgr_get_total_iso_bw(enum tegra_iso_client client);
/* Initialize isomgr.
* This api would be called by .init_machine during boot.
* isomgr clients, don't call this api.
*/
int __init isomgr_init(void);
#else
static inline tegra_isomgr_handle tegra_isomgr_register(
enum tegra_iso_client client,
u32 dedicated_bw,
tegra_isomgr_renegotiate renegotiate,
void *priv)
{
/* return a dummy handle to allow client function
* as if isomgr were enabled.
*/
return (tegra_isomgr_handle)1;
}
static inline void tegra_isomgr_unregister(tegra_isomgr_handle handle) {}
static inline u32 tegra_isomgr_reserve(tegra_isomgr_handle handle,
u32 bw, u32 lt)
{
return 1;
}
static inline u32 tegra_isomgr_realize(tegra_isomgr_handle handle)
{
return 1;
}
static inline int tegra_isomgr_set_margin(enum tegra_iso_client client, u32 bw)
{
return 0;
}
static inline int tegra_isomgr_get_imp_time(enum tegra_iso_client client,
u32 bw)
{
return 0;
}
static inline u32 tegra_isomgr_get_available_iso_bw(void)
{
return UINT_MAX;
}
static inline u32 tegra_isomgr_get_total_iso_bw(enum tegra_iso_client client)
{
return UINT_MAX;
}
static inline int isomgr_init(void)
{
return 0;
}
#endif
#endif /* _INCLUDE_MACH_ISOMGR_H */

View File

@@ -0,0 +1,330 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef _MACH_TEGRA_LATENCY_ALLOWANCE_H_
#define _MACH_TEGRA_LATENCY_ALLOWANCE_H_
#define FIRST_DISP_CLIENT_ID TEGRA_LA_DISPLAY_0A
#define DISP_CLIENT_LA_ID(id) (id - FIRST_DISP_CLIENT_ID)
/* Note:- When adding new display realted IDs, please add them adjacent/amongst
the existing display related IDs. This is required because certain
display related macros/strcuts assume that all display related
tegra_la_ids are adjacent to each other.
Please observe the same guidelines as display clients, when adding new
camera clients. All camera clients need to be located adjacent to each
other in tegra_la_id. This is required because certain camera related
macros/structs assume that all camera related tegra_la_ids are
adjacent to each other. */
enum tegra_la_id {
TEGRA_LA_AFIR = 0, /* T30 specific */
TEGRA_LA_AFIW, /* T30 specific */
TEGRA_LA_AVPC_ARM7R,
TEGRA_LA_AVPC_ARM7W,
TEGRA_LA_DISPLAY_0A,
TEGRA_LA_DISPLAY_0B,
TEGRA_LA_DISPLAY_0C,
TEGRA_LA_DISPLAY_1B, /* T30 specific */
TEGRA_LA_DISPLAY_HC,
TEGRA_LA_DISPLAY_0AB,
TEGRA_LA_DISPLAY_0BB,
TEGRA_LA_DISPLAY_0CB,
TEGRA_LA_DISPLAY_1BB, /* T30 specific */
TEGRA_LA_DISPLAY_HCB,
TEGRA_LA_DISPLAY_T, /* T14x specific */
TEGRA_LA_DISPLAYD, /* T14x specific */
TEGRA_LA_EPPUP,
TEGRA_LA_EPPU,
TEGRA_LA_EPPV,
TEGRA_LA_EPPY,
TEGRA_LA_G2PR,
TEGRA_LA_G2SR,
TEGRA_LA_G2DR,
TEGRA_LA_G2DW,
TEGRA_LA_GPUSRD, /* T12x specific */
TEGRA_LA_GPUSWR, /* T12x specific */
TEGRA_LA_HOST1X_DMAR,
TEGRA_LA_HOST1XR,
TEGRA_LA_HOST1XW,
TEGRA_LA_HDAR,
TEGRA_LA_HDAW,
TEGRA_LA_ISPW,
TEGRA_LA_MPCORER,
TEGRA_LA_MPCOREW,
TEGRA_LA_MPCORE_LPR,
TEGRA_LA_MPCORE_LPW,
TEGRA_LA_MPE_UNIFBR, /* T30 specific */
TEGRA_LA_MPE_IPRED, /* T30 specific */
TEGRA_LA_MPE_AMEMRD, /* T30 specific */
TEGRA_LA_MPE_CSRD, /* T30 specific */
TEGRA_LA_MPE_UNIFBW, /* T30 specific */
TEGRA_LA_MPE_CSWR, /* T30 specific */
TEGRA_LA_FDCDRD,
TEGRA_LA_IDXSRD,
TEGRA_LA_TEXSRD,
TEGRA_LA_TEXL2SRD = TEGRA_LA_TEXSRD, /* T11x, T14x specific */
TEGRA_LA_FDCDWR,
TEGRA_LA_FDCDRD2,
TEGRA_LA_IDXSRD2, /* T30 specific */
TEGRA_LA_TEXSRD2, /* T30 specific */
TEGRA_LA_FDCDWR2,
TEGRA_LA_PPCS_AHBDMAR,
TEGRA_LA_PPCS_AHBSLVR,
TEGRA_LA_PPCS_AHBDMAW,
TEGRA_LA_PPCS_AHBSLVW,
TEGRA_LA_PTCR,
TEGRA_LA_SATAR, /* T30, T19x */
TEGRA_LA_SATAW, /* T30, T19x */
TEGRA_LA_VDE_BSEVR,
TEGRA_LA_VDE_MBER,
TEGRA_LA_VDE_MCER,
TEGRA_LA_VDE_TPER,
TEGRA_LA_VDE_BSEVW,
TEGRA_LA_VDE_DBGW,
TEGRA_LA_VDE_MBEW,
TEGRA_LA_VDE_TPMW,
TEGRA_LA_VI_RUV, /* T30 specific */
TEGRA_LA_VI_WSB,
TEGRA_LA_VI_WU,
TEGRA_LA_VI_WV,
TEGRA_LA_VI_WY,
TEGRA_LA_MSENCSRD, /* T11x, T14x specific */
TEGRA_LA_MSENCSWR, /* T11x, T14x specific */
TEGRA_LA_XUSB_HOSTR, /* T11x, T19x */
TEGRA_LA_XUSB_HOSTW, /* T11x, T19x */
TEGRA_LA_XUSB_DEVR, /* T11x, T19x */
TEGRA_LA_XUSB_DEVW, /* T11x, T19x */
TEGRA_LA_FDCDRD3, /* T11x specific */
TEGRA_LA_FDCDRD4, /* T11x specific */
TEGRA_LA_FDCDWR3, /* T11x specific */
TEGRA_LA_FDCDWR4, /* T11x specific */
TEGRA_LA_EMUCIFR, /* T11x, T14x specific */
TEGRA_LA_EMUCIFW, /* T11x, T14x specific */
TEGRA_LA_TSECSRD, /* T11x, T14x, T19x */
TEGRA_LA_TSECSWR, /* T11x, T14x, T19x */
TEGRA_LA_VI_W, /* T14x specific */
TEGRA_LA_ISP_RA, /* T14x specific */
TEGRA_LA_ISP_WA, /* T14x specific */
TEGRA_LA_ISP_WB, /* T14x specific */
TEGRA_LA_ISP_RAB, /* T12x specific */
TEGRA_LA_ISP_WAB, /* T12x specific */
TEGRA_LA_ISP_WBB, /* T12x specific */
TEGRA_LA_BBCR, /* T14x specific */
TEGRA_LA_BBCW, /* T14x specific */
TEGRA_LA_BBCLLR, /* T14x specific */
TEGRA_LA_SDMMCR, /* T12x, T19x */
TEGRA_LA_SDMMCRA, /* T12x, T19x */
TEGRA_LA_SDMMCRAA, /* T12x specific */
TEGRA_LA_SDMMCRAB, /* T12x, T19x */
TEGRA_LA_SDMMCW, /* T12x, T19x */
TEGRA_LA_SDMMCWA, /* T12x, T19x */
TEGRA_LA_SDMMCWAA, /* T12x specific */
TEGRA_LA_SDMMCWAB, /* T12x, T19x */
TEGRA_LA_VICSRD, /* T12x, T19x */
TEGRA_LA_VICSWR, /* T12x, T19x */
TEGRA_LA_TSECBSRD, /* T21x specific */
TEGRA_LA_TSECBSWR, /* T21x specific */
TEGRA_LA_NVDECR, /* T21x specific */
TEGRA_LA_NVDECW, /* T21x specific */
TEGRA_LA_AONR, /* T18x, T19x */
TEGRA_LA_AONW, /* T18x, T19x */
TEGRA_LA_AONDMAR, /* T18x, T19x */
TEGRA_LA_AONDMAW, /* T18x, T19x */
TEGRA_LA_APEDMAR, /* T18x, T19x */
TEGRA_LA_APEDMAW, /* T18x, T19x */
TEGRA_LA_APER, /* T18x, T19x */
TEGRA_LA_APEW, /* T18x, T19x */
TEGRA_LA_AXISR, /* T18x, T19x */
TEGRA_LA_AXISW, /* T18x, T19x */
TEGRA_LA_BPMPR, /* T18x, T19x */
TEGRA_LA_BPMPW, /* T18x, T19x */
TEGRA_LA_BPMPDMAR, /* T18x, T19x */
TEGRA_LA_BPMPDMAW, /* T18x, T19x */
TEGRA_LA_EQOSR, /* T18x, T19x */
TEGRA_LA_EQOSW, /* T18x, T19x */
TEGRA_LA_ETRR, /* T18x, T19x */
TEGRA_LA_ETRW, /* T18x, T19x */
TEGRA_LA_GPUSRD2, /* T18x specific */
TEGRA_LA_GPUSWR2, /* T18x specific */
TEGRA_LA_NVDISPLAYR, /* T18x, T19x */
TEGRA_LA_NVENCSRD, /* T18x, T19x */
TEGRA_LA_NVENCSWR, /* T18x, T19x */
TEGRA_LA_NVJPGSRD, /* T18x, T19x */
TEGRA_LA_NVJPGSWR, /* T18x, T19x */
TEGRA_LA_SCER, /* T18x, T19x */
TEGRA_LA_SCEW, /* T18x, T19x */
TEGRA_LA_SCEDMAR, /* T18x, T19x */
TEGRA_LA_SCEDMAW, /* T18x, T19x */
TEGRA_LA_SESRD, /* T18x, T19x */
TEGRA_LA_SESWR, /* T18x, T19x */
TEGRA_LA_UFSHCR, /* T18x, T19x */
TEGRA_LA_UFSHCW, /* T18x, T19x */
TEGRA_LA_AXIAPR, /* T19x specific */
TEGRA_LA_AXIAPW, /* T19x specific */
TEGRA_LA_CIFLL_WR, /* T19x specific */
TEGRA_LA_DLA0FALRDB, /* T19x specific */
TEGRA_LA_DLA0RDA, /* T19x specific */
TEGRA_LA_DLA0FALWRB, /* T19x specific */
TEGRA_LA_DLA0WRA, /* T19x specific */
TEGRA_LA_DLA0RDA1, /* T19x specific */
TEGRA_LA_DLA1RDA1, /* T19x specific */
TEGRA_LA_DLA1FALRDB, /* T19x specific */
TEGRA_LA_DLA1RDA, /* T19x specific */
TEGRA_LA_DLA1FALWRB, /* T19x specific */
TEGRA_LA_DLA1WRA, /* T19x specific */
TEGRA_LA_HOST1XDMAR, /* T19x specific */
TEGRA_LA_ISPFALR, /* T19x specific */
TEGRA_LA_ISPRA, /* T19x specific */
TEGRA_LA_ISPWA, /* T19x specific */
TEGRA_LA_ISPWB, /* T19x specific */
TEGRA_LA_ISPFALW, /* T19x specific */
TEGRA_LA_ISPRA1, /* T19x specific */
TEGRA_LA_MIU0R, /* T19x specific */
TEGRA_LA_MIU0W, /* T19x specific */
TEGRA_LA_MIU1R, /* T19x specific */
TEGRA_LA_MIU1W, /* T19x specific */
TEGRA_LA_MIU2R, /* T19x specific */
TEGRA_LA_MIU2W, /* T19x specific */
TEGRA_LA_MIU3R, /* T19x specific */
TEGRA_LA_MIU3W, /* T19x specific */
TEGRA_LA_MIU4R, /* T19x specific */
TEGRA_LA_MIU4W, /* T19x specific */
TEGRA_LA_MIU5R, /* T19x specific */
TEGRA_LA_MIU5W, /* T19x specific */
TEGRA_LA_MIU6R, /* T19x specific */
TEGRA_LA_MIU6W, /* T19x specific */
TEGRA_LA_MIU7R, /* T19x specific */
TEGRA_LA_MIU7W, /* T19x specific */
TEGRA_LA_NVDECSRD, /* T19x specific */
TEGRA_LA_NVDECSWR, /* T19x specific */
TEGRA_LA_NVDEC1SRD, /* T19x specific */
TEGRA_LA_NVDECSRD1, /* T19x specific */
TEGRA_LA_NVDEC1SRD1, /* T19x specific */
TEGRA_LA_NVDEC1SWR, /* T19x specific */
TEGRA_LA_NVENC1SRD, /* T19x specific */
TEGRA_LA_NVENC1SWR, /* T19x specific */
TEGRA_LA_NVENC1SRD1, /* T19x specific */
TEGRA_LA_NVENCSRD1, /* T19x specific */
TEGRA_LA_PCIE0R, /* T19x specific */
TEGRA_LA_PCIE0W, /* T19x specific */
TEGRA_LA_PCIE1R, /* T19x specific */
TEGRA_LA_PCIE1W, /* T19x specific */
TEGRA_LA_PCIE2AR, /* T19x specific */
TEGRA_LA_PCIE2AW, /* T19x specific */
TEGRA_LA_PCIE3R, /* T19x specific */
TEGRA_LA_PCIE3W, /* T19x specific */
TEGRA_LA_PCIE4R, /* T19x specific */
TEGRA_LA_PCIE4W, /* T19x specific */
TEGRA_LA_PCIE5R, /* T19x specific */
TEGRA_LA_PCIE5W, /* T19x specific */
TEGRA_LA_PCIE0R1, /* T19x specific */
TEGRA_LA_PCIE5R1, /* T19x specific */
TEGRA_LA_PVA0RDA, /* T19x specific */
TEGRA_LA_PVA0RDB, /* T19x specific */
TEGRA_LA_PVA0RDC, /* T19x specific */
TEGRA_LA_PVA0WRA, /* T19x specific */
TEGRA_LA_PVA0WRB, /* T19x specific */
TEGRA_LA_PVA0WRC, /* T19x specific */
TEGRA_LA_PVA0RDA1, /* T19x specific */
TEGRA_LA_PVA0RDB1, /* T19x specific */
TEGRA_LA_PVA1RDA, /* T19x specific */
TEGRA_LA_PVA1RDB, /* T19x specific */
TEGRA_LA_PVA1RDC, /* T19x specific */
TEGRA_LA_PVA1WRA, /* T19x specific */
TEGRA_LA_PVA1WRB, /* T19x specific */
TEGRA_LA_PVA1WRC, /* T19x specific */
TEGRA_LA_PVA1RDA1, /* T19x specific */
TEGRA_LA_PVA1RDB1, /* T19x specific */
TEGRA_LA_RCEDMAR, /* T19x specific */
TEGRA_LA_RCEDMAW, /* T19x specific */
TEGRA_LA_RCER, /* T19x specific */
TEGRA_LA_RCEW, /* T19x specific */
TEGRA_LA_TSECSRDB, /* T19x specific */
TEGRA_LA_TSECSWRB, /* T19x specific */
TEGRA_LA_VIW, /* T19x specific */
TEGRA_LA_VICSRD1, /* T19x specific */
TEGRA_LA_VIFALR, /* T19x specific */
TEGRA_LA_VIFALW, /* T19x specific */
TEGRA_LA_WCAM, /* T19x specific */
TEGRA_LA_NVLRHP, /* T19x specific */
TEGRA_LA_DGPU, /* T19x specific */
TEGRA_LA_IGPU, /* T19x specific */
TEGRA_LA_MAX_ID
};
enum disp_win_type {
TEGRA_LA_DISP_WIN_TYPE_FULL,
TEGRA_LA_DISP_WIN_TYPE_FULLA,
TEGRA_LA_DISP_WIN_TYPE_FULLB,
TEGRA_LA_DISP_WIN_TYPE_SIMPLE,
TEGRA_LA_DISP_WIN_TYPE_CURSOR,
TEGRA_LA_DISP_WIN_TYPE_NUM_TYPES
};
struct disp_client {
enum disp_win_type win_type;
unsigned int mccif_size_bytes;
unsigned int line_buf_sz_bytes;
};
struct dc_to_la_params {
unsigned int thresh_lwm_bytes;
unsigned int spool_up_buffering_adj_bytes;
unsigned int drain_time_usec_fp;
unsigned int total_dc0_bw;
unsigned int total_dc1_bw;
};
struct la_to_dc_params {
unsigned int fp_factor;
unsigned int (*la_real_to_fp)(unsigned int val);
unsigned int (*la_fp_to_real)(unsigned int val);
unsigned int static_la_minus_snap_arb_to_row_srt_emcclks_fp;
unsigned int dram_width_bits;
unsigned int disp_catchup_factor_fp;
};
int tegra_set_disp_latency_allowance(enum tegra_la_id id,
unsigned long emc_freq_hz,
unsigned int bandwidth_in_mbps,
struct dc_to_la_params disp_params);
int tegra_check_disp_latency_allowance(enum tegra_la_id id,
unsigned long emc_freq_hz,
unsigned int bw_mbps,
struct dc_to_la_params disp_params);
int tegra_set_latency_allowance(enum tegra_la_id id,
unsigned int bandwidth_in_mbps);
int tegra_set_camera_ptsa(enum tegra_la_id id,
unsigned int bw_mbps,
int is_hiso);
void tegra_latency_allowance_update_tick_length(unsigned int new_ns_per_tick);
int tegra_enable_latency_scaling(enum tegra_la_id id,
unsigned int threshold_low,
unsigned int threshold_mid,
unsigned int threshold_high);
void tegra_disable_latency_scaling(enum tegra_la_id id);
void mc_pcie_init(void);
struct la_to_dc_params tegra_get_la_to_dc_params(void);
extern const struct disp_client *tegra_la_disp_clients_info;
#endif /* _MACH_TEGRA_LATENCY_ALLOWANCE_H_ */

View File

@@ -0,0 +1,403 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* camera_common.h - utilities for tegra camera driver
*
* Copyright (c) 2015-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef __camera_common__
#define __camera_common__
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/platform_device.h>
#include <linux/v4l2-mediabus.h>
#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/module.h>
#include <media/camera_version_utils.h>
#include <media/nvc_focus.h>
#include <media/sensor_common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
#include <media/tegracam_core.h>
/*
* Scaling factor for converting a Q10.22 fixed point value
* back to its original floating point value
*/
#define FIXED_POINT_SCALING_FACTOR (1ULL << 22)
struct reg_8 {
u16 addr;
u8 val;
};
struct reg_16 {
u16 addr;
u16 val;
};
struct camera_common_power_rail {
struct regulator *dvdd;
struct regulator *avdd;
struct regulator *iovdd;
struct regulator *vcmvdd;
struct clk *mclk;
unsigned int pwdn_gpio;
unsigned int reset_gpio;
unsigned int af_gpio;
bool state;
};
struct camera_common_regulators {
const char *avdd;
const char *dvdd;
const char *iovdd;
const char *vcmvdd;
};
struct camera_common_pdata {
const char *mclk_name; /* NULL for default default_mclk */
const char *parentclk_name; /* NULL for no parent clock*/
unsigned int pwdn_gpio;
unsigned int reset_gpio;
unsigned int af_gpio;
bool ext_reg;
int (*power_on)(struct camera_common_power_rail *pw);
int (*power_off)(struct camera_common_power_rail *pw);
struct camera_common_regulators regulators;
bool use_cam_gpio;
bool has_eeprom;
bool v_flip;
bool h_mirror;
unsigned int fuse_id_addr;
unsigned int avdd_latency;
};
struct camera_common_eeprom_data {
struct i2c_client *i2c_client;
struct i2c_adapter *adap;
struct i2c_board_info brd;
struct regmap *regmap;
};
int
regmap_util_write_table_8(struct regmap *regmap,
const struct reg_8 table[],
const struct reg_8 override_list[],
int num_override_regs,
u16 wait_ms_addr, u16 end_addr);
int
regmap_util_write_table_16_as_8(struct regmap *regmap,
const struct reg_16 table[],
const struct reg_16 override_list[],
int num_override_regs,
u16 wait_ms_addr, u16 end_addr);
enum switch_state {
SWITCH_OFF,
SWITCH_ON,
};
static const s64 switch_ctrl_qmenu[] = {
SWITCH_OFF, SWITCH_ON
};
/*
* The memory buffers allocated from nvrm are aligned to
* fullfill the hardware requirements:
* - size in alignment with a multiple of 128K/64K bytes,
* see CL http://git-master/r/256468 and bug 1321091.
*/
static const s64 size_align_ctrl_qmenu[] = {
1, (64 * 1024), (128 * 1024),
};
struct camera_common_frmfmt {
struct v4l2_frmsize_discrete size;
const int *framerates;
int num_framerates;
bool hdr_en;
int mode;
};
struct camera_common_colorfmt {
unsigned int code;
enum v4l2_colorspace colorspace;
int pix_fmt;
enum v4l2_xfer_func xfer_func;
enum v4l2_ycbcr_encoding ycbcr_enc;
enum v4l2_quantization quantization;
};
struct camera_common_framesync {
u32 inck; /* kHz */
u32 xhs; /* in inck */
u32 xvs; /* in xhs */
u32 fps; /* frames in 1000 second */
};
struct tegracam_device ;
struct camera_common_data;
struct camera_common_sensor_ops {
u32 numfrmfmts;
const struct camera_common_frmfmt *frmfmt_table;
int (*power_on)(struct camera_common_data *s_data);
int (*power_off)(struct camera_common_data *s_data);
int (*write_reg)(struct camera_common_data *s_data,
u16 addr, u8 val);
int (*read_reg)(struct camera_common_data *s_data,
u16 addr, u8 *val);
struct camera_common_pdata *(*parse_dt)(struct tegracam_device *tc_dev);
int (*power_get)(struct tegracam_device *tc_dev);
int (*power_put)(struct tegracam_device *tc_dev);
int (*get_framesync)(struct camera_common_data *s_data,
struct camera_common_framesync *vshs);
int (*set_mode)(struct tegracam_device *tc_dev);
int (*start_streaming)(struct tegracam_device *tc_dev);
int (*stop_streaming)(struct tegracam_device *tc_dev);
};
struct tegracam_sensor_data {
struct sensor_blob mode_blob;
struct sensor_blob ctrls_blob;
};
struct tegracam_ctrl_ops {
u32 numctrls;
u32 string_ctrl_size[TEGRA_CAM_MAX_STRING_CONTROLS];
u32 compound_ctrl_size[TEGRA_CAM_MAX_COMPOUND_CONTROLS];
const u32 *ctrl_cid_list;
bool is_blob_supported;
int (*set_gain)(struct tegracam_device *tc_dev, s64 val);
int (*set_exposure)(struct tegracam_device *tc_dev, s64 val);
int (*set_exposure_short)(struct tegracam_device *tc_dev, s64 val);
int (*set_frame_rate)(struct tegracam_device *tc_dev, s64 val);
int (*set_group_hold)(struct tegracam_device *tc_dev, bool val);
int (*fill_string_ctrl)(struct tegracam_device *tc_dev,
struct v4l2_ctrl *ctrl);
int (*fill_compound_ctrl)(struct tegracam_device *tc_dev,
struct v4l2_ctrl *ctrl);
int (*set_gain_ex)(struct tegracam_device *tc_dev,
struct sensor_blob *blob, s64 val);
int (*set_exposure_ex)(struct tegracam_device *tc_dev,
struct sensor_blob *blob, s64 val);
int (*set_frame_rate_ex)(struct tegracam_device *tc_dev,
struct sensor_blob *blob, s64 val);
int (*set_group_hold_ex)(struct tegracam_device *tc_dev,
struct sensor_blob *blob, bool val);
};
struct tegracam_ctrl_handler {
struct v4l2_ctrl_handler ctrl_handler;
const struct tegracam_ctrl_ops *ctrl_ops;
struct tegracam_device *tc_dev;
struct tegracam_sensor_data sensor_data;
int numctrls;
struct v4l2_ctrl *ctrls[MAX_CID_CONTROLS];
};
struct camera_common_data {
struct camera_common_sensor_ops *ops;
struct v4l2_ctrl_handler *ctrl_handler;
struct device *dev;
const struct camera_common_frmfmt *frmfmt;
const struct camera_common_colorfmt *colorfmt;
struct dentry *debugdir;
struct camera_common_power_rail *power;
struct v4l2_subdev subdev;
struct v4l2_ctrl **ctrls;
struct module *owner;
struct sensor_properties sensor_props;
/* TODO: cleanup neeeded once all the sensors adapt new framework */
struct tegracam_ctrl_handler *tegracam_ctrl_hdl;
struct regmap *regmap;
struct camera_common_pdata *pdata;
/* TODO: cleanup needed for priv once all the sensors adapt new framework */
void *priv;
int numctrls;
int csi_port;
int numlanes;
int mode;
int mode_prop_idx;
int numfmts;
int def_mode, def_width, def_height;
int def_clk_freq;
int fmt_width, fmt_height;
int sensor_mode_id;
bool use_sensor_mode_id;
bool override_enable;
u32 version;
};
struct camera_common_focuser_data;
struct camera_common_focuser_ops {
int (*power_on)(struct camera_common_focuser_data *s_data);
int (*power_off)(struct camera_common_focuser_data *s_data);
int (*load_config)(struct camera_common_focuser_data *s_data);
int (*ctrls_init)(struct camera_common_focuser_data *s_data);
};
struct camera_common_focuser_data {
struct camera_common_focuser_ops *ops;
struct v4l2_ctrl_handler *ctrl_handler;
struct v4l2_subdev subdev;
struct v4l2_ctrl **ctrls;
struct device *dev;
struct nv_focuser_config config;
void *priv;
int pwr_dev;
int def_position;
};
static inline void msleep_range(unsigned int delay_base)
{
usleep_range(delay_base * 1000, delay_base * 1000 + 500);
}
static inline struct camera_common_data *to_camera_common_data(
const struct device *dev)
{
if (sensor_common_parse_num_modes(dev))
return container_of(dev_get_drvdata(dev),
struct camera_common_data, subdev);
return NULL;
}
static inline struct camera_common_focuser_data *to_camera_common_focuser_data(
const struct device *dev)
{
return container_of(dev_get_drvdata(dev),
struct camera_common_focuser_data, subdev);
}
int camera_common_g_ctrl(struct camera_common_data *s_data,
struct v4l2_control *control);
int camera_common_regulator_get(struct device *dev,
struct regulator **vreg, const char *vreg_name);
int camera_common_parse_clocks(struct device *dev,
struct camera_common_pdata *pdata);
int camera_common_parse_ports(struct device *dev,
struct camera_common_data *s_data);
int camera_common_mclk_enable(struct camera_common_data *s_data);
void camera_common_mclk_disable(struct camera_common_data *s_data);
int camera_common_parse_general_properties(struct device *dev,
struct camera_common_data *s_data);
int camera_common_debugfs_show(struct seq_file *s, void *unused);
ssize_t camera_common_debugfs_write(
struct file *file,
char const __user *buf,
size_t count,
loff_t *offset);
int camera_common_debugfs_open(struct inode *inode, struct file *file);
void camera_common_remove_debugfs(struct camera_common_data *s_data);
void camera_common_create_debugfs(struct camera_common_data *s_data,
const char *name);
const struct camera_common_colorfmt *camera_common_find_datafmt(
unsigned int code);
int camera_common_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code);
int camera_common_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
unsigned int *code);
int camera_common_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf);
int camera_common_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf);
int camera_common_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf);
int camera_common_enum_framesizes(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse);
int camera_common_enum_frameintervals(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_interval_enum *fie);
int camera_common_set_power(struct camera_common_data *data, int on);
int camera_common_s_power(struct v4l2_subdev *sd, int on);
void camera_common_dpd_disable(struct camera_common_data *s_data);
void camera_common_dpd_enable(struct camera_common_data *s_data);
int camera_common_get_mbus_config(struct v4l2_subdev *sd,
unsigned int pad,
struct v4l2_mbus_config *cfg);
int camera_common_get_framesync(struct v4l2_subdev *sd,
struct camera_common_framesync *vshs);
/* Common initialize and cleanup for camera */
int camera_common_initialize(struct camera_common_data *s_data,
const char *dev_name);
void camera_common_cleanup(struct camera_common_data *s_data);
/* Focuser */
int camera_common_focuser_init(struct camera_common_focuser_data *s_data);
int camera_common_focuser_s_power(struct v4l2_subdev *sd, int on);
const struct camera_common_colorfmt *camera_common_find_pixelfmt(
unsigned int pix_fmt);
/* common control layer init */
int tegracam_ctrl_set_overrides(struct tegracam_ctrl_handler *handler);
int tegracam_ctrl_handler_init(struct tegracam_ctrl_handler *handler);
int tegracam_init_ctrl_ranges(struct tegracam_ctrl_handler *handler);
int tegracam_init_ctrl_ranges_by_mode(
struct tegracam_ctrl_handler *handler,
u32 modeidx);
/* Regmap / RTCPU I2C driver interface */
struct tegra_i2c_rtcpu_sensor;
struct tegra_i2c_rtcpu_config;
struct camera_common_i2c {
struct regmap *regmap;
struct tegra_i2c_rtcpu_sensor *rt_sensor;
};
int camera_common_i2c_init(
struct camera_common_i2c *sensor,
struct i2c_client *client,
struct regmap_config *regmap_config,
const struct tegra_i2c_rtcpu_config *rtcpu_config);
int camera_common_i2c_aggregate(
struct camera_common_i2c *sensor,
bool start);
int camera_common_i2c_set_frame_id(
struct camera_common_i2c *sensor,
int frame_id);
int camera_common_i2c_read_reg8(
struct camera_common_i2c *sensor,
unsigned int addr,
u8 *data,
unsigned int count);
int camera_common_i2c_write_reg8(
struct camera_common_i2c *sensor,
unsigned int addr,
const u8 *data,
unsigned int count);
int camera_common_i2c_write_table_8(
struct camera_common_i2c *sensor,
const struct reg_8 table[],
const struct reg_8 override_list[],
int num_override_regs, u16 wait_ms_addr, u16 end_addr);
#endif /* __camera_common__ */

View File

@@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* camera_version_utils.h - utilities for different kernel versions
* camera driver supports
*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __camera_version_utils__
#define __camera_version_utils__
#include <linux/videodev2.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/debugfs.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-mediabus.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-dv-timings.h>
int tegra_media_entity_init(struct media_entity *entity, u16 num_pads,
struct media_pad *pad, bool is_subdev, bool is_sensor);
int tegra_media_create_link(struct media_entity *source, u16 source_pad,
struct media_entity *sink, u16 sink_pad, u32 flags);
bool tegra_is_v4l2_subdev(struct media_entity *entity);
bool tegra_v4l2_match_dv_timings(struct v4l2_dv_timings *t1,
struct v4l2_dv_timings *t2,
unsigned pclock_delta,
bool match_reduced_fps);
int tegra_vb2_dma_init(struct device *dev, void **alloc_ctx,
unsigned int size, atomic_t *refcount);
void tegra_vb2_dma_cleanup(struct device *dev, void *alloc_ctx,
atomic_t *refcount);
#endif

171
include/media/csi.h Normal file
View File

@@ -0,0 +1,171 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NVIDIA Tegra CSI Device Header
*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __CSI_H_
#define __CSI_H_
#include <linux/minmax.h>
#include <media/media-entity.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
#include <media/camera_common.h>
#include <media/vi2_registers.h>
#include <media/csi4_registers.h>
#include <linux/platform_device.h>
#include "soc/tegra/camrtc-capture.h"
#define MAX_CSI_BLOCK_LANES 4
#define NUM_TPG_INSTANCE 6
#define csi_port_is_valid(port) (port > NVCSI_PORT_H ? 0 : 1)
enum camera_gang_mode {
CAMERA_NO_GANG_MODE = 0,
CAMERA_GANG_L_R = 1,
CAMERA_GANG_T_B,
CAMERA_GANG_R_L,
CAMERA_GANG_B_T
};
struct tegra_channel;
struct tpg_frmfmt {
struct v4l2_frmsize_discrete frmsize;
int pixel_format;
int framerate;
int h_blank;
int v_blank;
};
struct tegra_csi_port {
void __iomem *pixel_parser;
void __iomem *cil;
void __iomem *tpg;
u32 csi_port;
u32 stream_id;
u32 virtual_channel_id;
/* One pair of sink/source pad has one format */
struct v4l2_mbus_framefmt format;
const struct tegra_video_format *core_format;
unsigned int lanes;
unsigned int framerate;
unsigned int h_blank;
unsigned int v_blank;
};
struct tegra_csi_device {
struct device *dev;
struct platform_device *pdev;
char devname[32];
void __iomem *iomem_base;
void __iomem *iomem[3];
struct clk *plld_dsi;
struct clk *plld;
struct camera_common_data s_data[6];
struct tegra_csi_port *ports;
struct media_pad *pads;
unsigned int clk_freq;
int num_ports;
int num_channels;
struct list_head csi_chans;
struct tegra_csi_channel *tpg_start;
const struct tegra_csi_fops *fops;
const struct tpg_frmfmt *tpg_frmfmt_table;
unsigned int tpg_frmfmt_table_size;
bool tpg_gain_ctrl;
bool tpg_emb_data_config;
int (*get_tpg_settings)(struct tegra_csi_port *port,
union nvcsi_tpg_config *const tpg_config);
atomic_t power_ref;
struct dentry *debugdir;
struct mutex source_update;
int tpg_active;
int sensor_active;
/* num_tpg_channels is a fixed number per soc*/
int num_tpg_channels;
};
/*
* subdev: channel subdev
* numports: Number of CSI ports in use for this channel
* numlanes: Number of CIL lanes in use
*/
struct tegra_csi_channel {
struct list_head list;
struct v4l2_subdev subdev;
struct media_pad *pads;
struct media_pipeline pipe;
struct v4l2_subdev *sensor_sd;
struct tegra_csi_device *csi;
struct tegra_csi_port *ports;
unsigned char port[TEGRA_CSI_BLOCKS];
struct mutex format_lock;
unsigned int numports;
unsigned int numlanes;
unsigned int pg_mode;
struct camera_common_data *s_data;
unsigned int id;
atomic_t is_streaming;
struct device_node *of_node;
};
static inline struct tegra_csi_channel *to_csi_chan(struct v4l2_subdev *subdev)
{
return container_of(subdev, struct tegra_csi_channel, subdev);
}
static inline struct tegra_csi_device *to_csi(struct v4l2_subdev *subdev)
{
struct tegra_csi_channel *chan = to_csi_chan(subdev);
return chan->csi;
}
u32 read_phy_mode_from_dt(struct tegra_csi_channel *chan);
u32 read_settle_time_from_dt(struct tegra_csi_channel *chan);
u64 read_mipi_clk_from_dt(struct tegra_csi_channel *chan);
void set_csi_portinfo(struct tegra_csi_device *csi,
unsigned int port, unsigned int numlanes);
void tegra_csi_status(struct tegra_csi_channel *chan, int port_idx);
int tegra_csi_error(struct tegra_csi_channel *chan, int port_idx);
int tegra_csi_start_streaming(struct tegra_csi_channel *chan, int port_idx);
void tegra_csi_stop_streaming(struct tegra_csi_channel *chan, int port_idx);
int tegra_csi_tpg_set_gain(struct v4l2_subdev *sd, void *arg);
void tegra_csi_error_recover(struct tegra_csi_channel *chan, int port_idx);
int tegra_csi_init(struct tegra_csi_device *csi,
struct platform_device *pdev);
int tegra_csi_mipi_calibrate(struct tegra_csi_device *csi,
bool on);
int tegra_csi_media_controller_init(struct tegra_csi_device *csi,
struct platform_device *pdev);
int tegra_csi_media_controller_remove(struct tegra_csi_device *csi);
struct tegra_csi_device *tegra_get_mc_csi(void);
int tpg_csi_media_controller_init(struct tegra_csi_device *csi, int pg_mode);
void tpg_csi_media_controller_cleanup(struct tegra_csi_device *csi);
int tegra_csi_power(struct tegra_csi_device *csi,
struct tegra_csi_channel *chan, int enable);
int tegra_csi_error_recovery(struct tegra_channel *chan,
struct tegra_csi_device *csi, struct tegra_csi_channel *csi_chan);
/* helper functions to calculate clock setting times */
unsigned int tegra_csi_clk_settling_time(
struct tegra_csi_device *csi,
const unsigned int csicil_clk_mhz);
unsigned int tegra_csi_ths_settling_time(
struct tegra_csi_device *csi,
const unsigned int csicil_clk_mhz,
const unsigned int mipi_clk_mhz);
#endif

View File

@@ -0,0 +1,211 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra 18x CSI register offsets
*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __CSI4_REGISTERS_H__
#define __CSI4_REGISTERS_H__
#define CSI4_BASE_ADDRESS 0x18000
#define CSI4_PHY_OFFSET 0x10000
#define CSI4_STREAM_OFFSET 0x800
#define CSI_PORTS (6)
#define PHY_BRICKS (3)
/* NVCSI registers. Starts from 0x0 */
#define CFG_NVCSI_INCR_SYNCPT_CNTRL 0x04
/* NVCSI_STREAM registers */
#define TEGRA_CSI_STREAM_0_BASE 0x010000
#define TEGRA_CSI_STREAM_1_BASE 0x010800
#define TEGRA_CSI_STREAM_2_BASE 0x020000
#define TEGRA_CSI_STREAM_3_BASE 0x020800
#define TEGRA_CSI_STREAM_4_BASE 0x030000
#define TEGRA_CSI_STREAM_5_BASE 0x030800
#define PP_EN_CTRL 0x08
#define CFG_PP_EN (0x1 << 0)
#define PPFSM_TIMEOUT_CTRL 0x6c
#define CFG_TIMEOUT_EN (0x1 << 31)
#define CFG_TIMEOUT_PERIOD (0x7fffffff << 0)
#define VC0_DT_OVERRIDE 0x20
#define CFG_VC0_DT_OVERRIDE_EN (0x1 << 31)
#define CFG_VC0_DT_OVERRIDE (0x3f << 0)
#define PH_CHK_CTRL 0x70
#define CFG_PH_CRC_CHK_EN (0x1 << 1)
#define CFG_PH_ECC_CHK_EN (0x1 << 0)
#define VC0_DPCM_CTRL 0x74
#define CFG_VC0_DPCM_COMPRESSION_RATIO (0xf << 0)
#define ERROR_STATUS2VI_MASK 0x90
/* T186 TPG */
#define TPG_EN_0 0x0b8
#define cfg_tpg_en 0x1
/* NVCSI_STREAM Legacy T210 PG*/
#define PG_CTRL 0x194
#define PG_MODE_OFFSET 2
#define PG_ENABLE 0x1
#define PG_DISABLE 0x0
#define PG_BLANK 0x198
#define PG_VBLANK_MASK 0xffff
#define PG_HBLANK_MASK 0xffff
#define PG_VBLANK_OFFSET 16
#define PG_HBLANK_OFFSET 0
#define PG_PHASE 0x19c
#define PG_RED_FREQ 0x1a0
#define PG_VERT_INIT_FREQ_OFFSET 16
#define PG_HOR_INIT_FREQ_OFFSET 0
#define PG_RED_FREQ_RATE 0x1a4
#define PG_GREEN_FREQ 0x1a8
#define PG_GREEN_FREQ_RATE 0x1ac
#define PG_BLUE_FREQ 0x1b0
#define PG_BLUE_FREQ_RATE 0X1b4
#define PG_AOHDR 0x1b8
#define PG_IMAGE_SIZE 0x1bc
#define HEIGHT_OFFSET 16
#define PG_IMAGE_DT 0x1c0
/* TODO - double check if rr_status2vi_vc0:[0] means bit or value */
#define ERROR_STATUS2VI_VC0 0x94
#define ERROR_STATUS2VI_VC1 0x98
#define ERROR_STATUS2VI_VC2 0x9c
#define ERROR_STATUS2VI_VC3 0xa0
#define ERR_STATUS2VI_VC (0xf << 0)
#define ERR_PP_FSM_TIMEOUT (0)
#define ERR_PH_ECC_SINGLE_BIT (1)
#define ERR_PACKET_PAYLOAD_CRC (2)
#define ERR_PACKET_PAYLOAD_LESS (3)
#define INTR_STATUS 0xa4
#define INTR_MASK 0xa8
#define PD_CRC_ERR_VC0 (0x1 << 2)
#define PH_ECC_SINGLE_BIT_ERR_VC0 (0x1 << 1)
#define PH_ECC_MULTI_BIT_ERR (0x1 << 16)
#define ERR_INTR_STATUS 0xac
#define ERR_INTR_MASK 0xb0
#define MASK_PH_CRC_ERR (0x1 << 17)
#define MASK_PH_ECC_MULTI_BIT_ERR (0x1 << 16)
#define MASK_PD_WC_SHORT_ERR_VC3 (0x1 << 15)
#define MASK_PD_CRC_ERR_VC3 (0x1 << 14)
#define MASK_PH_ECC_SINGLE_BIT_ERR_VC3 (0x1 << 13)
#define MASK_PPFSM_TIMEOUT_VC3 (0x1 << 12)
#define MASK_PD_WC_SHORT_ERR_VC2 (0x1 << 11)
#define MASK_PD_CRC_ERR_VC2 (0x1 << 10)
#define MASK_PH_ECC_SINGLE_BIT_ERR_VC2 (0x1 << 9)
#define MASK_PPFSM_TIMEOUT_VC2 (0x1 << 8)
#define MASK_PD_WC_SHORT_ERR_VC1 (0x1 << 7)
#define MASK_PD_CRC_ERR_VC1 (0x1 << 6)
#define MASK_PH_ECC_SINGLE_BIT_ERR_VC1 (0x1 << 5)
#define MASK_PPFSM_TIMEOUT_VC1 (0x1 << 4)
#define MASK_PD_WC_SHORT_ERR_VC0 (0x1 << 3)
#define MASK_PD_CRC_ERR_VC0 (0x1 << 2)
#define MASK_PH_ECC_SINGLE_BIT_ERR_VC0 (0x1 << 1)
#define MASK_PPFSM_TIMEOUT_VC0 (0x1 << 0)
/* For ERR_INTR_MASK and ERR_INTR_MASK */
#define MASK_HSM_INTR_SW_TRIGGER (0x1 << 18)
/* NVCSI_PHY CIL registers */
#define NVCSI_PHY_0_CILA_BASE 0x010400
#define NVCSI_PHY_0_CILB_BASE 0x010C00
#define NVCSI_PHY_1_CILA_BASE 0x020400
#define NVCSI_PHY_1_CILB_BASE 0x020C00
#define NVCSI_PHY_2_CILA_BASE 0x030400
#define NVCSI_PHY_2_CILB_BASE 0x030C00
#define CILA_INTR_STATUS 0x400
#define CILA_INTR_MASK 0x404
#define CILA_ERR_INTR_STATUS 0x408
#define CILA_ERR_INTR_MASK 0x40c
#define CILB_INTR_STATUS 0xc00
#define CILB_INTR_MASK 0xc04
#define CILB_ERR_INTR_STATUS 0xc08
#define CILB_ERR_INTR_MASK 0xc0c
/* NVCSI_PHY registers */
#define NVCSI_CIL_PHY_CTRL 0x00
#define CFG_PHY_MODE (0x1 << 0)
#define DPHY (0)
#define CPHY (1)
#define NVCSI_CIL_CONFIG 0x04
#define DATA_LANE_B_OFFSET 0x8
#define DATA_LANE_A_OFFSET 0x0
#define DATA_LANE_B (0x7 << DATA_LANE_B_OFFSET)
#define DATA_LANE_A (0x7 << DATA_LANE_A_OFFSET)
#define NVCSI_CIL_PAD_CONFIG 0x0c
#define LOADADJ (0xf << 12)
#define PDVCLAMP (0x1 << 9)
#define E_VCLAMP (0x1 << 8)
#define SPARE_TOP (0xff << 0)
#define NVCSI_CIL_A_SW_RESET 0x18
#define NVCSI_CIL_B_SW_RESET 0x7c
#define SW_RESET1_EN (0x1 << 1)
#define SW_RESET0_EN (0x1 << 0)
#define NVCSI_CIL_A_PAD_CONFIG 0x20
#define NVCSI_CIL_B_PAD_CONFIG 0x84
#define E_INPUT_LP_IO1_SHIFT 22
#define E_INPUT_LP_IO0_SHIFT 21
#define E_INPUT_LP_CLK_SHIFT 20
#define E_INPUT_LP_IO1 (0x1 << 22)
#define E_INPUT_LP_IO0 (0x1 << 21)
#define E_INPUT_LP_CLK (0x1 << 20)
#define BANDWD_IN (0x1 << 19)
#define PD_CLK (0x1 << 18)
#define PD_IO1 (0x1 << 17)
#define PD_IO0 (0x1 << 16)
#define PD_CLK_SHIFT 18
#define PD_IO1_SHIFT 17
#define PD_IO0_SHIFT 16
#define SPARE_CLK (0x1 << 8)
#define SPARE_IO1 (0x1 << 4)
#define SPARE_IO0 (0x1 << 0)
#define NVCSI_CIL_A_CONTROL 0x5c
#define NVCSI_CIL_B_CONTROL 0xc0
#define T18X_BYPASS_LP_SEQ_SHIFT 7
#define DESCKEW_COMPARE_SHIFT 20
#define DESCKEW_SETTLE_SHIFT 16
#define CLK_SETTLE_SHIFT 8
#define THS_SETTLE_SHIFT 0
#define DEFAULT_DESKEW_COMPARE (0x4 << DESCKEW_COMPARE_SHIFT)
#define DEFAULT_DESKEW_SETTLE (0x6 << DESCKEW_SETTLE_SHIFT)
#define DEFAULT_DPHY_CLK_SETTLE (0x21 << CLK_SETTLE_SHIFT)
#define DEFAULT_CPHY_CLK_SETTLE (0x1 << CLK_SETTLE_SHIFT)
#define T18X_BYPASS_LP_SEQ (0x1 << T18X_BYPASS_LP_SEQ_SHIFT)
#define RESET_DESKEW_COMPARE (0x1 << DESCKEW_COMPARE_SHIFT)
#define RESET_DESKEW_SETTLE (0x1 << DESCKEW_SETTLE_SHIFT)
#define DEFAULT_THS_SETTLE (0x14 << THS_SETTLE_SHIFT)
#define NVCSI_CIL_A_POLARITY_SWIZZLE_CTRL 0x58
#define NVCSI_CIL_B_POLARITY_SWIZZLE_CTRL 0xbc
#define NVCSI_CIL_LANE_SWIZZLE_CTRL 0x10
#define NVCSI_CIL_A_DPHY_INADJ_CTRL (0x24)
#define NVCSI_CIL_A_CPHY_INADJ_CTRL (0x28)
#define NVCSI_CIL_B_DPHY_INADJ_CTRL (0x88)
#define NVCSI_CIL_B_CPHY_INADJ_CTRL (0x8c)
#define DEFAULT_SW_SET_DPHY_INADJ_IO0 (0x1 << 6)
#define DEFAULT_SW_SET_DPHY_INADJ_IO1 (0x1 << 14)
#define DEFAULT_DPHY_INADJ_IO0 (0xc)
#define DEFAULT_DPHY_INADJ_IO1 (0xc << 8)
#define DEFAULT_CPHY_EDGE_DELAY_TRIO0 (0x1 << 19)
#define DEFAULT_CPHY_EDGE_DELAY_TRIO1 (0x1 << 23)
/* MIPICAL */
#define NVCSI_CIL_A_BASE 0x18
#define NVCSI_CIL_B_BASE 0x7c
#define PAD_CONFIG_0 0x8
#endif /* __CSI4_REGISTERS_H__ */

View File

@@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra 19x CSI register offsets
*
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __CSI5_REGISTERS_H__
#define __CSI5_REGISTERS_H__
#define CSI5_BASE_ADDRESS 0x011000
#define CSI5_PHY_OFFSET 0x010000
#define CSI5_TEGRA_CSI_STREAM_0_BASE 0x10000
#define CSI5_TEGRA_CSI_STREAM_2_BASE 0x20000
#define CSI5_TEGRA_CSI_STREAM_4_BASE 0x30000
#define CSI5_NVCSI_CIL_A_SW_RESET 0x24
#define CSI5_NVCSI_CIL_B_SW_RESET 0xb0
#define CSI5_SW_RESET1_EN (0x1 << 1)
#define CSI5_SW_RESET0_EN (0x1 << 0)
#define CSI5_E_INPUT_LP_IO1_SHIFT 22
#define CSI5_E_INPUT_LP_IO0_SHIFT 21
#define CSI5_E_INPUT_LP_CLK_SHIFT 20
#define CSI5_E_INPUT_LP_IO1 (0x1 << 22)
#define CSI5_E_INPUT_LP_IO0 (0x1 << 21)
#define CSI5_E_INPUT_LP_CLK (0x1 << 20)
#define CSI5_PD_CLK (0x1 << 18)
#define CSI5_PD_IO1 (0x1 << 17)
#define CSI5_PD_IO0 (0x1 << 16)
#define CSI5_PD_CLK_SHIFT 18
#define CSI5_PD_IO1_SHIFT 17
#define CSI5_PD_IO0_SHIFT 16
/* MIPICAL */
#define CSI5_NVCSI_CIL_A_BASE 0x24
#define CSI5_NVCSI_CIL_B_BASE 0xb0
#define CSI5_PAD_CONFIG_0 0x8
#endif /* __CSI5_REGISTERS_H__ */

View File

@@ -0,0 +1,236 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
*/
/**
* @file include/media/fusa-capture/capture-common.h
*
* @brief VI/ISP channel common operations header for the T186/T194 Camera RTCPU
* platform.
*/
#ifndef __FUSA_CAPTURE_COMMON_H__
#define __FUSA_CAPTURE_COMMON_H__
#include <media/mc_common.h>
struct capture_buffer_table;
struct capture_mapping;
/**
* @defgroup CAPTURE_PROGRESS_NOTIFIER_STATES
*
* Progress state of a capture request.
*
* @note PROGRESS_STATUS_DONE only means that the capture request has been
* completed, the descriptor status must still be read in order to
* determine whether or not it was successful or in error.
*
* @{
*/
/** Capture request is enqueued or in progress */
#define PROGRESS_STATUS_BUSY (U32_C(0x1))
/** Capture request is complete and the data can be consumed */
#define PROGRESS_STATUS_DONE (U32_C(0x2))
/** @} */
/**
* @defgroup CAPTURE_BUFFER_OPS
*
* Capture surface buffer operations and DMA directions.
*
* @{
*/
/** @brief DMA @em to device data direction. */
#define BUFFER_READ (U32_C(0x01))
/** @brief DMA @em from device data direction. */
#define BUFFER_WRITE (U32_C(0x02))
/** @brief Add buffer to the channel's management table. */
#define BUFFER_ADD (U32_C(0x04))
/** @brief DMA bidirectional data direction. */
#define BUFFER_RDWR (BUFFER_READ | BUFFER_WRITE)
/** @} */
/** @brief max pin count per request. Used to preallocate unpin list */
#define MAX_PIN_BUFFER_PER_REQUEST (U32_C(24))
/**
* @brief Initialize the capture surface management table for SLAB allocations.
*
* @param[in] dev Originating device (VI or ISP)
*
* @returns pointer to table on success, NULL on error
*/
struct capture_buffer_table *create_buffer_table(
struct device *dev);
/**
* @brief Release all capture buffers and free the management table
*
* @param[in,out] tab Surface buffer management table
*/
void destroy_buffer_table(
struct capture_buffer_table *tab);
/**
* @brief Perform a buffer management operation on a capture surface buffer.
*
* @param[in,out] tab Surface buffer management table
* @param[in] memfd FD or NvRm handle to buffer
* @param[in] flag Surface BUFFER_* op bitmask
*
* @returns 0 (success), neg. errno (failure)
*/
int capture_buffer_request(
struct capture_buffer_table *tab,
uint32_t memfd,
uint32_t flag);
/**
* @brief Add a capture surface buffer to the buffer management table.
*
* @param[in,out] t Surface buffer management table
* @param[in] fd FD or NvRm handle to buffer
*
* @returns 0 (success), neg. errno (failure)
*/
int capture_buffer_add(
struct capture_buffer_table *t,
uint32_t fd);
/**
* @brief Decrement refcount for buffer mapping, and release it if it reaches
* zero, unless it is a preserved mapping.
*
* @param[in,out] t Surface buffer management table
* @param[in,out] pin Surface buffer to unpin
*/
void put_mapping(
struct capture_buffer_table *t,
struct capture_mapping *pin);
/**
* @brief Capture surface buffer context.
*/
struct capture_common_buf {
struct dma_buf *buf; /**< dma_buf context */
struct dma_buf_attachment *attach; /**< dma_buf attachment context */
struct sg_table *sgt; /**< scatterlist table */
dma_addr_t iova; /**< dma address */
void *va; /**< virtual address for kernel access */
};
/**
* @brief List of buffers to unpin for a capture request.
*/
struct capture_common_unpins {
uint32_t num_unpins; /**< No. of entries in data[] */
struct capture_mapping *data[MAX_PIN_BUFFER_PER_REQUEST]; /**< Surface buffers to unpin */
};
/**
* @brief Progress status notifier handle.
*/
struct capture_common_status_notifier {
struct dma_buf *buf; /**< dma_buf handle */
void *va; /**< buffer virtual mapping to kernel address space */
uint32_t offset; /**< status notifier offset [byte] */
};
/**
* @brief Setup the progress status notifier handle
*
* @param[in] status_notifer Progress status notifier handle
* @param[in] mem FD or NvRm handle to buffer
* @param[in] buffer_size Buffer size [byte]
* @param[in] mem_offset Status notifier offset [byte]
*
* @returns 0 (success), neg. errno (failure)
*/
int capture_common_setup_progress_status_notifier(
struct capture_common_status_notifier *status_notifier,
uint32_t mem,
uint32_t buffer_size,
uint32_t mem_offset);
/**
* @brief Release the progress status notifier handle.
*
* @param[in,out] progress_status_notifier Progress status notifier
* handle to release
*
* @returns 0
*/
int capture_common_release_progress_status_notifier(
struct capture_common_status_notifier *progress_status_notifier);
/**
* @brief Update the progress status for a capture request.
*
* @param[in] progress_status_notifier Progress status notifier handle
* @param[in] buffer_slot Capture descriptor index
* @param[in] buffer_depth Capture descriptor queue size
* @param[in] new_val Progress status to set
*
* @returns 0 (success), neg. errno (failure)
*/
int capture_common_set_progress_status(
struct capture_common_status_notifier *progress_status_notifier,
uint32_t buffer_slot,
uint32_t buffer_depth,
uint8_t new_val);
/**
* @brief Pins buffer memory, returns dma_buf handles for unpinning.
*
* @param[in] dev target device (rtcpu)
* @param[in] mem FD or NvRm handle to buffer
* @param[out] unpin_data struct w/ dma_buf handles for unpinning
*
* @returns 0 (success), neg. errno (failure)
*/
int capture_common_pin_memory(
struct device *dev,
uint32_t mem,
struct capture_common_buf *unpin_data);
/**
* @brief Unpins buffer memory, releasing dma_buf resources.
*
* @param[in,out] unpin_data data handle to be unpinned
*/
void capture_common_unpin_memory(
struct capture_common_buf *unpin_data);
/**
* @brief Pins (maps) the physical address for provided capture surface address
* and updates the iova pointer.
*
* @param[in,out] buf_ctx Surface buffer management table
* @param[in] mem_handle Memory handle (descriptor). Can be NULL,
* in this case function will do nothing and
* and return 0. This is to simplify handling of
* capture descriptors data fields, NULL indicates
* unused memory surface.
* @param[in] mem_offset Offset inside memory buffer
* @param[out] meminfo_base_address Surface iova address, including offset
* @param[out] meminfo_size Size of iova range, excluding offset
* @param[in,out] unpins Unpin data used to unref/unmap buffer
* after capture
*/
int capture_common_pin_and_get_iova(struct capture_buffer_table *buf_ctx,
uint32_t mem_handle, uint64_t mem_offset,
uint64_t *meminfo_base_address, uint64_t *meminfo_size,
struct capture_common_unpins *unpins);
#endif /* __FUSA_CAPTURE_COMMON_H__*/

View File

@@ -0,0 +1,120 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
*/
/**
* @file include/media/fusa-capture/capture-isp-channel.h
*
* @brief ISP channel character device driver header for the T186/T194 Camera
* RTCPU platform.
*/
#ifndef __FUSA_CAPTURE_ISP_CHANNEL_H__
#define __FUSA_CAPTURE_ISP_CHANNEL_H__
#include <linux/of_platform.h>
struct isp_channel_drv;
/**
* @brief ISP fops for Host1x syncpt/gos allocations
*
* This fops is a HAL for chip/IP generations, see the respective VI platform
* drivers for the implementations.
*/
struct isp_channel_drv_ops {
/**
* @brief Request a syncpt allocation from Host1x.
*
* @param[in] pdev ISP platform_device
* @param[in] name syncpt name
* @param[out] syncpt_id assigned syncpt id
*
* @returns 0 (success), neg. errno (failure)
*/
int (*alloc_syncpt)(
struct platform_device *pdev,
const char *name,
uint32_t *syncpt_id);
/**
* @brief Release a syncpt to Host1x.
*
* @param[in] pdev ISP platform_device
* @param[in] id syncpt id to free
*/
void (*release_syncpt)(
struct platform_device *pdev,
uint32_t id);
/**
* @brief Retrieve the GoS table allocated in the ISP-THI carveout.
*
* @param[in] pdev ISP platform_device
* @param[out] table GoS table pointer
*/
uint32_t (*get_gos_table)(
struct platform_device *pdev,
const dma_addr_t **table);
/**
* @brief Get a syncpt's GoS backing in the ISP-THI carveout.
*
* @param[in] pdev ISP platform_device
* @param[in] id syncpt id
* @param[out] gos_index GoS id
* @param[out] gos_offset Offset of syncpt within GoS [dword]
*
* @returns 0 (success), neg. errno (failure)
*/
int (*get_syncpt_gos_backing)(
struct platform_device *pdev,
uint32_t id,
dma_addr_t *syncpt_addr,
uint32_t *gos_index,
uint32_t *gos_offset);
};
/**
* @brief ISP channel context (character device).
*/
struct tegra_isp_channel {
struct device *isp_dev; /**< ISP device */
struct platform_device *ndev; /**< ISP platform_device */
struct isp_channel_drv *drv; /**< ISP channel driver context */
void *priv; /**< ISP channel private context */
struct isp_capture *capture_data; /**< ISP channel capture context */
const struct isp_channel_drv_ops *ops; /**< ISP syncpt/gos fops */
};
/**
* @brief Create the ISP channels driver contexts, and instantiate
* MAX_ISP_CHANNELS many channel character device nodes.
*
* ISP channel nodes appear in the filesystem as:
* /dev/capture-isp-channel{0..MAX_ISP_CHANNELS-1}
*
* @param[in] ndev ISP platform_device context
* @param[in] ops isp_channel_drv_ops fops
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_channel_drv_register(
struct platform_device *pdev,
const struct isp_channel_drv_ops *ops);
/**
* @brief Destroy the ISP channels driver and all character device nodes.
*
* The ISP channels driver and associated channel contexts in memory are freed,
* rendering the ISP platform driver unusable until re-initialized.
*
* @param[in] dev ISP device context
*/
void isp_channel_drv_unregister(
struct device *dev);
int isp_channel_drv_init(void);
void isp_channel_drv_exit(void);
#endif /* __FUSA_CAPTURE_ISP_CHANNEL_H__ */

View File

@@ -0,0 +1,361 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
*/
/**
* @file include/media/fusa-capture/capture-isp.h
*
* @brief ISP channel operations header for the T186/T194 Camera RTCPU platform.
*/
#ifndef __FUSA_CAPTURE_ISP_H__
#define __FUSA_CAPTURE_ISP_H__
#if defined(__KERNEL__)
#include <linux/compiler.h>
#include <linux/types.h>
#else
#include <stdint.h>
#endif
#include <linux/ioctl.h>
#define __ISP_CAPTURE_ALIGN __aligned(8)
struct tegra_isp_channel;
/**
* @brief ISP descriptor relocs config.
*/
struct capture_isp_reloc {
uint32_t num_relocs; /**< No. of buffers to pin/reloc */
uint32_t __pad;
uint64_t reloc_relatives;
/**< Offsets buffer addresses to patch in descriptor */
} __ISP_CAPTURE_ALIGN;
/**
* @brief ISP channel setup config (IOCTL payload).
*
* These fields are used to set up the ISP channel and capture contexts, and
* will be copied verbatim in the IVC capture_channel_isp_config struct to
* allocate ISP resources in the RCE subsystem.
*/
struct isp_capture_setup {
uint32_t channel_flags;
/**<
* Bitmask for channel flags, see @ref CAPTURE_ISP_CHANNEL_FLAGS
*/
uint32_t __pad_flags;
/* ISP process capture descriptor queue (ring buffer) */
uint32_t queue_depth;
/**< No. of process capture descriptors in queue */
uint32_t request_size;
/**< Size of a single process capture descriptor [byte] */
uint32_t mem; /**< Process capture descriptors queue NvRm handle */
/* ISP process program descriptor queue (ring buffer) */
uint32_t isp_program_queue_depth;
/**< No. of process program descriptors in queue */
uint32_t isp_program_request_size;
/**< Size of a single process program descriptor [byte] */
uint32_t isp_program_mem;
/**< Process program descriptors queue NvRm handle */
uint32_t error_mask_correctable;
/**<
* Bitmask for correctable channel errors. See
* @ref CAPTURE_ISP_CHANNEL_ERRORS
*/
uint32_t error_mask_uncorrectable;
/**<
* Bitmask for uncorrectable channel errors. See
* @ref CAPTURE_ISP_CHANNEL_ERRORS
*/
} __ISP_CAPTURE_ALIGN;
/**
* @brief ISP capture info (resp. to query).
*/
struct isp_capture_info {
struct isp_capture_syncpts {
uint32_t progress_syncpt; /**< Progress syncpoint id */
uint32_t progress_syncpt_val; /**< Progress syncpoint value */
uint32_t stats_progress_syncpt;
/**< Stats progress syncpoint id */
uint32_t stats_progress_syncpt_val;
/**< Stats progress syncpoint value */
} syncpts;
uint32_t channel_id; /**< RCE-assigned ISP FW channel id */
} __ISP_CAPTURE_ALIGN;
/**
* @brief ISP process capture request (IOCTL payload).
*/
struct isp_capture_req {
uint32_t buffer_index; /**< Process descriptor index */
uint32_t __pad;
struct capture_isp_reloc isp_relocs;
/**<
* Surface buffers pin/reloc config. See @ref capture_isp_reloc
*/
struct capture_isp_reloc inputfences_relocs;
/**<
* Inputfences to pin/reloc. config. See @ref capture_isp_reloc
*/
uint32_t gos_relative; /* GoS offset [byte] */
uint32_t sp_relative; /* Syncpt offset [byte] */
struct capture_isp_reloc prefences_relocs;
/**<
* Prefences to pin/reloc. config. See @ref capture_isp_reloc
*/
} __ISP_CAPTURE_ALIGN;
/**
* @brief ISP process program request (IOCTL payload).
*/
struct isp_program_req {
uint32_t buffer_index; /**< Program descriptor index. */
uint32_t __pad;
struct capture_isp_reloc isp_program_relocs;
/**<
* Push buffers to pin/reloc. config. See
* @ref capture_isp_reloc
*/
} __ISP_CAPTURE_ALIGN;
/**
* @brief ISP joint capture+program request (IOCTL payload).
*/
struct isp_capture_req_ex {
struct isp_capture_req capture_req; /**< ISP capture process request */
struct isp_program_req program_req; /**< ISP program process request */
uint32_t __pad[4];
} __ISP_CAPTURE_ALIGN;
/**
* @brief ISP capture progress status setup config (IOCTL payload).
*/
struct isp_capture_progress_status_req {
uint32_t mem; /**< NvRm handle to buffer region start */
uint32_t mem_offset; /**< Status notifier offset [byte] */
uint32_t process_buffer_depth;
/**< Process capture descriptor queue size [num] */
uint32_t program_buffer_depth;
/**< Process program descriptor queue size [num] */
uint32_t __pad[4];
} __ISP_CAPTURE_ALIGN;
/**
* @brief Add ISP capture buffer to management table (IOCTL payload).
*/
struct isp_buffer_req {
uint32_t mem; /**< NvRm handle to buffer */
uint32_t flag; /**< Buffer @ref CAPTURE_BUFFER_OPS bitmask */
} __ISP_CAPTURE_ALIGN;
/**
* @brief Initialize an ISP channel capture context (at channel open).
*
* The ISP channel context is already partially-initialized by the calling
* function, the channel capture context is allocated and linked here.
*
* @param[in,out] chan Allocated ISP channel context,
* partially-initialized
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_init(
struct tegra_isp_channel *chan);
/**
* @brief De-initialize an ISP capture channel, closing open ISP streams, and
* freeing the buffer management table and channel capture context.
*
* The ISP channel context is not freed in this function, only the capture
* context is.
*
* The ISP channel should have been RESET and RELEASE'd when this function is
* called, but they may still be active due to programming error or client UMD
* crash. In such cases, they will be called automatically by the @em Abort
* functionality.
*
* @param[in,out] chan VI channel context
*/
void isp_capture_shutdown(
struct tegra_isp_channel *chan);
/**
* @brief Open an ISP channel in RCE, sending channel configuration to request a
* SW channel allocation. Syncpts are allocated by the KMD in this subroutine.
*
* @param[in,out] chan ISP channel context
* @param[in] setup ISP channel setup config
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_setup(
struct tegra_isp_channel *chan,
struct isp_capture_setup *setup);
/**
* @brief Reset an opened ISP channel, all pending process requests to RCE are
* discarded.
*
* The channel's progress syncpoint is advanced to the threshold of the latest
* capture/program request to unblock any waiting observers.
*
* A reset barrier may be enqueued in the capture IVC channel to flush stale
* capture/program descriptors, in case of abnormal channel termination.
*
* @param[in] chan VI channel context
* @param[in] reset_flags Bitmask for ISP channel reset options
* (CAPTURE_CHANNEL_RESET_FLAG_*)
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_reset(
struct tegra_isp_channel *chan,
uint32_t reset_flags);
/**
* @brief Release an opened ISP channel; the RCE channel allocation, syncpoints
* and IVC channel callbacks are released.
*
* @param[in] chan ISP channel context
* @param[in] reset_flags Bitmask for ISP channel reset options
* (CAPTURE_CHANNEL_RESET_FLAG_*)
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_release(
struct tegra_isp_channel *chan,
uint32_t reset_flags);
/**
* @brief Query an ISP channel's syncpoint ids and values, and retrieve the
* RCE-assigned ISP FW channel id.
*
* @param[in] chan ISP channel context
* @param[out] info ISP channel info response
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_get_info(
struct tegra_isp_channel *chan,
struct isp_capture_info *info);
/**
* @brief Send a capture (aka. process) request for a frame via the capture IVC
* channel to RCE.
*
* This is a non-blocking call.
*
* @param[in] chan ISP channel context
* @param[in] req ISP process capture request
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_request(
struct tegra_isp_channel *chan,
struct isp_capture_req *req);
/**
* @brief Wait on receipt of the capture status of the head of the capture
* request FIFO queue to RCE. The RCE ISP driver sends a CAPTURE_ISP_STATUS_IND
* notification at frame completion.
*
* This is a blocking call, with the possibility of timeout.
*
* @todo The capture progress status notifier is expected to replace this
* functionality in the future, deprecating it.
*
* @param[in] chan ISP channel context
* @param[in] timeout_ms Time to wait for status completion [ms], set to
* 0 for indefinite
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_status(
struct tegra_isp_channel *chan,
int32_t timeout_ms);
/**
* @brief Send a program request containing an ISP pushbuffer configuration via
* the capture IVC channel to RCE.
*
* This is a non-blocking call.
*
* @param[in] chan ISP channel context
* @param[in] req ISP program request
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_program_request(
struct tegra_isp_channel *chan,
struct isp_program_req *req);
/**
* @brief Wait on receipt of the program status of the head of the program
* request FIFO queue to RCE. The RCE ISP driver sends a
* CAPTURE_ISP_PROGRAM_STATUS_IND notification at completion.
*
* This is a blocking call, with no possibility of timeout; as programs may be
* reused for multiple frames.
*
* @todo The capture progress status notifier is expected to replace this
* functionality in the future, deprecating it.
*
* @param[in] chan ISP channel context
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_program_status(
struct tegra_isp_channel *chan);
/**
* @brief Send an extended capture (aka. process) request for a frame,
* containing the ISP pushbuffer program to execute via the capture IVC channel
* to RCE.
*
* The extended call is equivalent to separately sending a capture and a program
* request for every frame; it is an optimization to reduce the number of
* system context switches from IOCTL and IVC calls.
*
* This is a non-blocking call.
*
* @param[in] chan ISP channel context
* @param[in] req ISP extended process request
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_request_ex(
struct tegra_isp_channel *chan,
struct isp_capture_req_ex *req);
/**
* @brief Setup ISP channel capture status progress notifier
*
* @param[in] chan ISP channel context
* @param[in] req ISP capture progress status setup config
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_set_progress_status_notifier(
struct tegra_isp_channel *chan,
struct isp_capture_progress_status_req *req);
/**
* @brief Perform a buffer management operation on an ISP capture buffer.
*
* @param[in] chan ISP channel context
* @param[in] req ISP capture buffer request
*
* @returns 0 (success), neg. errno (failure)
*/
int isp_capture_buffer_request(
struct tegra_isp_channel *chan,
struct isp_buffer_req *req);
#endif /* __FUSA_CAPTURE_ISP_H__ */

View File

@@ -0,0 +1,205 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
*/
/**
* @file include/media/fusa-capture/capture-vi-channel.h
*
* @brief VI channel character device driver header for the T186/T194 Camera
* RTCPU platform.
*/
#ifndef __FUSA_CAPTURE_VI_CHANNEL_H__
#define __FUSA_CAPTURE_VI_CHANNEL_H__
#include <linux/of_platform.h>
struct vi_channel_drv;
/**
* @brief VI fops for Host1x syncpt/gos allocations
*
* This fops is a HAL for chip/IP generations, see the respective VI platform
* drivers for the implementations.
*/
struct vi_channel_drv_ops {
/**
* Request a syncpt allocation from Host1x.
*
* @param[in] pdev VI platform_device
* @param[in] name syncpt name
* @param[out] syncpt_id assigned syncpt id
*
* @returns 0 (success), neg. errno (failure)
*/
int (*alloc_syncpt)(
struct platform_device *pdev,
const char *name,
uint32_t *syncpt_id);
/**
* Release a syncpt to Host1x.
*
* @param[in] pdev VI platform_device
* @param[in] id syncpt id to free
*/
void (*release_syncpt)(
struct platform_device *pdev,
uint32_t id);
/**
* Retrieve the GoS table allocated in the VI-THI carveout.
*
* @param[in] pdev VI platform_device
* @param[out] count No. of carveout devices
* @param[out] table GoS table pointer
*/
void (*get_gos_table)(
struct platform_device *pdev,
int *count,
const dma_addr_t **table);
/**
* Get a syncpt's GoS backing in the VI-THI carveout.
*
* @param[in] pdev VI platform_device
* @param[in] id syncpt id
* @param[out] gos_index GoS id
* @param[out] gos_offset Offset of syncpt within GoS [dword]
*
* @returns 0 (success), neg. errno (failure)
*/
int (*get_syncpt_gos_backing)(
struct platform_device *pdev,
uint32_t id,
dma_addr_t *syncpt_addr,
uint32_t *gos_index,
uint32_t *gos_offset);
};
/**
* @brief VI channel character device driver context.
*/
struct vi_channel_drv {
struct platform_device *vi_capture_pdev;
/**< Capture VI driver platform device */
bool use_legacy_path;
/**< Flag to maintain backward-compatibility for T186 */
struct device *dev; /**< VI kernel @em device */
struct platform_device *ndev; /**< VI kernel @em platform_device */
struct mutex lock; /**< VI channel driver context lock */
u8 num_channels; /**< No. of VI channel character devices */
const struct vi_channel_drv_ops *ops;
/**< VI fops for Host1x syncpt/gos allocations */
struct tegra_vi_channel __rcu *channels[];
/**< Allocated VI channel contexts */
};
/**
* @brief VI channel context (character device)
*/
struct tegra_vi_channel {
struct device *dev; /**< VI device */
struct platform_device *ndev; /**< VI nvhost platform_device */
struct platform_device *vi_capture_pdev;
/**< Capture VI driver platform device */
struct vi_channel_drv *drv; /**< VI channel driver context */
struct rcu_head rcu; /**< VI channel rcu */
struct vi_capture *capture_data; /**< VI channel capture context */
const struct vi_channel_drv_ops *ops; /**< VI syncpt/gos fops */
struct device *rtcpu_dev; /**< rtcpu device */
bool is_stream_opened; /**< Whether the NVCSI stream is opened */
};
/**
* @brief Create the VI channels driver contexts, and instantiate
* as many channel character device nodes as specified in the device tree.
*
* VI channel nodes appear in the filesystem as:
* /dev/capture-vi-channel{0..max_vi_channels-1}
*
* @param[in] ndev VI platform_device context
* @param[in] max_vi_channels Maximum number of VI channels
* @returns 0 (success), neg. errno (failure)
*/
int vi_channel_drv_register(
struct platform_device *ndev, unsigned int max_vi_channels);
/**
* @brief Destroy the VI channels driver and all character device nodes.
*
* The VI channels driver and associated channel contexts in memory are freed,
* rendering the VI platform driver unusable until re-initialized.
*
* @param[in] dev VI device context
*/
void vi_channel_drv_unregister(
struct device *dev);
/**
* @brief Register the chip specific syncpt/gos related function table
*
* @param[in] ops vi_channel_drv_ops fops
* @returns 0 (success), neg. errno (failure)
*/
int vi_channel_drv_fops_register(
const struct vi_channel_drv_ops *ops);
/**
* @brief Unpin and free the list of pinned capture_mapping's associated with a
* VI capture request.
*
* @param[in] chan VI channel context
* @param[in] buffer_index Capture descriptor queue index
*/
void vi_capture_request_unpin(
struct tegra_vi_channel *chan,
uint32_t buffer_index);
/*
* Internal APIs for V4L2 driver (aka. VI mode)
*/
/**
* @brief Open a VI channel character device node, power on the camera subsystem
* and initialize the channel driver context.
*
* The act of opening a VI channel character device node does not entail the
* reservation of a VI channel, VI_CAPTURE_SETUP must be called afterwards to
* request an allocation by RCE.
*
* @param[in] channel VI channel enumerated node iminor no.
* @param[in] is_mem_pinned Whether capture request memory will be pinned
*
* @returns tegra_vi_channel pointer (success), ERR_PTR (failure)
*/
struct tegra_vi_channel *vi_channel_open_ex(
unsigned int channel,
bool is_mem_pinned);
/**
* @brief Release a VI channel character device node, power off the camera
* subsystem and free the VI channel driver context.
*
* Under normal operation, the NVCSI stream and TPG source should be closed, and
* VI_CAPTURE_RESET followed by VI_CAPTURE_RELEASE should be called before
* releasing the file handle on the device node.
*
* If the user-mode client crashes, the operating system will call this
* @em release handler to perform all of those actions as part of the @em Abort
* functionality.
*
* @param[in] channel VI channel enumerated node iminor no.
* @param[in] chan VI channel context
*
* @returns 0
*/
int vi_channel_close_ex(
unsigned int channel,
struct tegra_vi_channel *chan);
int vi_channel_drv_init(void);
void vi_channel_drv_exit(void);
#endif /* __FUSA_CAPTURE_VI_CHANNEL_H__ */

View File

@@ -0,0 +1,454 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
*/
/**
* @file include/media/fusa-capture/capture-vi.h
*
* @brief VI channel operations header for the T186/T194 Camera RTCPU platform.
*/
#ifndef __FUSA_CAPTURE_VI_H__
#define __FUSA_CAPTURE_VI_H__
#if defined(__KERNEL__)
#include <linux/compiler.h>
#include <linux/types.h>
#else
#include <stdint.h>
#endif
#include <media/fusa-capture/capture-common.h>
#include <media/fusa-capture/capture-vi-channel.h>
#include "soc/tegra/camrtc-capture.h"
#include "soc/tegra/camrtc-capture-messages.h"
#define __VI_CAPTURE_ALIGN __aligned(8)
struct tegra_vi_channel;
struct capture_buffer_table;
/**
* @brief VI channel capture context.
*/
struct vi_capture {
uint16_t channel_id; /**< RCE-assigned VI FW channel id */
struct device *rtcpu_dev; /**< rtcpu device */
struct tegra_vi_channel *vi_channel; /**< VI channel context */
struct capture_buffer_table *buf_ctx;
/**< Surface buffer management table */
struct capture_common_buf requests; /**< Capture descriptors queue */
struct capture_descriptor_memoryinfo *requests_memoryinfo;
/**< memory info ringbuffer handle*/
uint64_t requests_memoryinfo_iova;
/**< memory info ringbuffer rtcpu iova */
size_t request_buf_size;
/**< Size of capture descriptor queue [byte] */
uint32_t queue_depth; /**< No. of capture descriptors in queue */
uint32_t request_size; /**< Size of single capture descriptor [byte] */
bool is_mem_pinned; /**< Whether capture request memory is pinned */
struct capture_common_status_notifier progress_status_notifier;
/**< Capture progress status notifier context */
uint32_t progress_status_buffer_depth;
/**< No. of capture descriptors */
bool is_progress_status_notifier_set;
/**< Whether progress_status_notifer has been initialized */
uint32_t stream_id; /**< NVCSI PixelParser index [0-5] */
uint32_t csi_port; /**< NVCSI ports A-H [0-7] */
uint32_t virtual_channel_id; /**< CSI virtual channel id [0-15] */
uint32_t num_gos_tables; /**< No. of cv devices in gos_tables */
const dma_addr_t *gos_tables; /**< IOVA addresses of all GoS devices */
struct syncpoint_info progress_sp; /**< Syncpoint for frame progress */
struct syncpoint_info embdata_sp;
/**< Syncpoint for embedded metadata */
struct syncpoint_info linetimer_sp;
/**< Syncpoint for frame line timer */
struct completion control_resp;
/**< Completion for capture-control IVC response */
struct completion capture_resp;
/**<
* Completion for capture requests (frame), if progress status
* notifier is not in use
*/
struct mutex control_msg_lock;
/**< Lock for capture-control IVC control_resp_msg */
struct CAPTURE_CONTROL_MSG control_resp_msg;
/**< capture-control IVC resp msg written to by callback */
struct mutex reset_lock;
/**< Channel lock for reset/abort support (via RCE) */
struct mutex unpins_list_lock; /**< Lock for unpins_list */
struct capture_common_unpins *unpins_list;
/**< List of capture request buffer unpins */
uint64_t vi_channel_mask;
/**< Bitmask of RCE-assigned VI FW channel(s). */
uint64_t vi2_channel_mask;
/**< Bitmask of RCE-assigned VI FW channel(s) for 2nd VI. */
};
/**
* @brief VI channel setup config (IOCTL payload).
*
* These fields are used to set up the VI channel and capture contexts, and will
* be copied verbatim in the IVC capture_channel_config struct to allocate VI
* resources in the RCE subsystem.
*/
struct vi_capture_setup {
uint32_t channel_flags;
/**<
* Bitmask for channel flags, see @ref CAPTURE_CHANNEL_FLAGS
*/
uint32_t error_mask_correctable;
/**<
* Bitmask for correctable channel errors. See
* @ref CAPTURE_CHANNEL_ERRORS
*/
uint64_t vi_channel_mask;
/**< Bitmask of VI channels to consider for allocation by RCE */
uint64_t vi2_channel_mask;
/**< Bitmask of 2nd VI channels */
uint32_t queue_depth; /**< No. of capture descriptors in queue. */
uint32_t request_size;
/**< Size of a single capture descriptor [byte] */
union {
uint32_t mem; /**< Capture descriptors queue NvRm handle */
uint64_t iova;
/**<
* Capture descriptors queue base address (written back
* after pinning by KMD)
*/
};
uint8_t slvsec_stream_main;
/**< SLVS-EC main stream (hardcode to 0x00) */
uint8_t slvsec_stream_sub;
/**< SLVS-EC sub stream (hardcode to 0xFF - disabled) */
uint16_t __pad_slvsec1;
uint32_t csi_stream_id; /**< NVCSI PixelParser index [0-5] */
uint32_t virtual_channel_id; /**< Virtual Channel index [0-15] */
uint32_t csi_port; /**< NVCSI Port [0-7], not valid for TPG */
uint32_t __pad_csi; /**< Reserved */
uint32_t error_mask_uncorrectable;
/**<
* Bitmask for correctable channel errors. See
* @ref CAPTURE_CHANNEL_ERRORS
*/
uint64_t stop_on_error_notify_bits;
/**<
* Bitmask for NOTIFY errors that force channel stop upon
* receipt
*/
uint64_t reserved[2];
} __VI_CAPTURE_ALIGN;
/**
* @brief VI capture info (resp. to query).
*/
struct vi_capture_info {
struct vi_capture_syncpts {
uint32_t progress_syncpt; /**< Progress syncpoint id */
uint32_t progress_syncpt_val; /**< Progress syncpoint value. */
uint32_t emb_data_syncpt; /**< Embedded metadata syncpoint id */
uint32_t emb_data_syncpt_val;
/**< Embedded metadata syncpt value. */
uint32_t line_timer_syncpt; /**< Line timer syncpoint id */
uint32_t line_timer_syncpt_val;
/**< Line timer syncpoint value */
} syncpts;
uint32_t hw_channel_id; /**< RCE-assigned VI FW channel id */
uint32_t __pad;
uint64_t vi_channel_mask;
/**< Bitmask of RCE-assigned VI FW channel(s) */
uint64_t vi2_channel_mask;
/**< Bitmask of RCE-assigned VI FW channel(s) for 2nd VI */
} __VI_CAPTURE_ALIGN;
/**
* @brief Container for CAPTURE_CONTROL_MSG req./resp. from FuSa UMD (IOCTL
* payload).
*
* The response and request pointers may be to the same memory allocation; in
* which case the request message will be overwritten by the response.
*/
struct vi_capture_control_msg {
uint64_t ptr; /**< Pointer to capture-control message req. */
uint32_t size; /**< Size of req./resp. msg [byte] */
uint32_t __pad;
uint64_t response; /**< Pointer to capture-control message resp. */
} __VI_CAPTURE_ALIGN;
/**
* @brief VI capture request (IOCTL payload).
*/
struct vi_capture_req {
uint32_t buffer_index; /**< Capture descriptor index. */
uint32_t num_relocs; /**< No. of surface buffers to pin/reloc. */
uint64_t reloc_relatives;
/**<
* Offsets to surface buffer addresses to patch in capture
* descriptor [byte].
*/
} __VI_CAPTURE_ALIGN;
/**
* @brief VI capture progress status setup config (IOCTL payload)
*/
struct vi_capture_progress_status_req {
uint32_t mem; /**< NvRm handle to buffer region start. */
uint32_t mem_offset; /**< Status notifier offset [byte]. */
uint32_t buffer_depth; /**< Capture descriptor queue size [num]. */
uint32_t __pad[3];
} __VI_CAPTURE_ALIGN;
/**
* @brief Add VI capture surface buffer to management table (IOCTL payload)
*/
struct vi_buffer_req {
uint32_t mem; /**< NvRm handle to buffer. */
uint32_t flag; /**< Buffer @ref CAPTURE_BUFFER_OPS bitmask. */
} __VI_CAPTURE_ALIGN;
/**
* @brief The compand configuration describes a piece-wise linear tranformation
* function used by the VI companding module.
*/
#define VI_CAPTURE_NUM_COMPAND_KNEEPTS 10
/**
* @brief VI compand setup config (IOCTL payload).
*/
struct vi_capture_compand {
uint32_t base[VI_CAPTURE_NUM_COMPAND_KNEEPTS];
/**< kneept base param. */
uint32_t scale[VI_CAPTURE_NUM_COMPAND_KNEEPTS];
/**< kneept scale param. */
uint32_t offset[VI_CAPTURE_NUM_COMPAND_KNEEPTS];
/**< kneept offset param. */
} __VI_CAPTURE_ALIGN;
/**
* @brief Initialize a VI channel capture context (at channel open).
*
* The VI channel context is already partially-initialized by the calling
* function, the channel capture context is allocated and linked here.
*
* @param[in,out] chan Allocated VI channel context,
* partially-initialized
* @param[in] is_mem_pinned Whether capture request memory is pinned
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_init(
struct tegra_vi_channel *chan,
bool is_mem_pinned);
/**
* @brief De-initialize a VI capture channel, closing open VI/NVCSI streams, and
* freeing the buffer management table and channel capture context.
*
* The VI channel context is not freed in this function, only the capture
* context is.
*
* @param[in,out] chan VI channel context
*/
void vi_capture_shutdown(
struct tegra_vi_channel *chan);
/**
* @brief Select the NvHost VI client instance platform driver to be
* associated with the channel.
* Only used in the case where VI standalone driver is used
* to enumerate the VI channel character drivers
*
* @param[in/out] chan VI channel context
* @param[in] setup VI channel setup config
*
*/
void vi_get_nvhost_device(
struct tegra_vi_channel *chan,
struct vi_capture_setup *setup);
/**
* @brief The function returns the corresponding NvHost VI client device
* pointer associated with the NVCSI stream Id. A NULL value is returned
* if invalid input parameters are passed.
*
* @param[in] pdev VI capture platform device pointer
* @param[in] csi_stream_id NVCSI stream Id
*
* @returns reference to VI device (success), null (failure)
*/
struct device *vi_csi_stream_to_nvhost_device(
struct platform_device *pdev,
uint32_t csi_stream_id);
/**
* @brief Open a VI channel in RCE, sending channel configuration to request a
* HW channel allocation. Syncpoints are allocated by the KMD in this
* subroutine.
*
* @param[in,out] chan VI channel context
* @param[in] setup VI channel setup config
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_setup(
struct tegra_vi_channel *chan,
struct vi_capture_setup *setup);
/**
* @brief Get the pointer to tegra_vi_channel struct associated with the
* stream id and virtual id passed as function input params.
*
* If no valid tegra_vi_channel pointer is found associated with the given
* stream id/ VC id combo then NULL is returned.
*
* @param[in] stream_id CSI stream ID
* @param[in] virtual_channel_id CSI virtual channel ID
*
* @returns pointer to tegra_vi_channel(success), NULL(failure)
*/
struct tegra_vi_channel *get_tegra_vi_channel(
unsigned int stream_id,
unsigned int virtual_channel_id);
/**
* @brief Reset an opened VI channel, all pending capture requests to RCE are
* discarded.
*
* The channel's progress syncpoint is advanced to the threshold of the latest
* capture request to unblock any waiting observers.
*
* A reset barrier may be enqueued in the capture IVC channel to flush stale
* capture descriptors, in case of abnormal channel termination.
*
* @param[in] chan VI channel context
* @param[in] reset_flags Bitmask for VI channel reset options
* (CAPTURE_CHANNEL_RESET_FLAG_*)
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_reset(
struct tegra_vi_channel *chan,
uint32_t reset_flags);
/**
* @brief Release an opened VI channel; the RCE channel allocation, syncpts and
* IVC channel callbacks are released.
*
* @param[in] chan VI channel context
* @param[in] reset_flags Bitmask for VI channel reset options
* (CAPTURE_CHANNEL_RESET_FLAG_*)
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_release(
struct tegra_vi_channel *chan,
uint32_t reset_flags);
/**
* @brief Release the TPG and/or NVCSI stream on a VI channel, if they are
* active.
*
* This function normally does not execute except in the event of abnormal UMD
* termination, as it is the client's responsibility to open and close NVCSI and
* TPG sources.
*
* @param[in] chan VI channel context
*
* @returns 0 (success), neg. errno (failure)
*/
int csi_stream_release(
struct tegra_vi_channel *chan);
/**
* @brief Send a capture-control IVC message to RCE and wait for a response.
*
* This is a blocking call, with the possibility of timeout.
*
* @param[in] chan VI channel context
* @param[in,out] msg capture-control IVC container w/ req./resp. pair
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_control_message(
struct tegra_vi_channel *chan,
struct vi_capture_control_msg *msg);
/**
* @brief Send a capture-control IVC message which is received from
* userspace to RCE and wait for a response.
*
* This is a blocking call, with the possibility of timeout.
*
* @param[in] chan VI channel context
* @param[in,out] msg capture-control IVC container w/ req./resp. pair
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_control_message_from_user(
struct tegra_vi_channel *chan,
struct vi_capture_control_msg *msg);
/**
* @brief Query a VI channel's syncpt ids and values, and retrieve the
* RCE-assigned VI FW channel id and mask.
*
* @param[in] chan VI channel context
* @param[out] info VI channel info response
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_get_info(
struct tegra_vi_channel *chan,
struct vi_capture_info *info);
/**
* @brief Send a capture request for a frame via the capture IVC channel to RCE.
*
* This is a non-blocking call.
*
* @param[in] chan VI channel context
* @param[in] req VI capture request
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_request(
struct tegra_vi_channel *chan,
struct vi_capture_req *req);
/**
* @brief Wait on receipt of the capture status of the head of the capture
* request FIFO queue to RCE. The RCE VI driver sends a
* CAPTURE_STATUS_IND notification at frame completion.
*
* This is a blocking call, with the possibility of timeout.
*
* @param[in] chan VI channel context
* @param[in] timeout_ms Time to wait for status completion [ms], set to
* 0 for indefinite
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_status(
struct tegra_vi_channel *chan,
int32_t timeout_ms);
/**
* @brief Setup VI channel capture status progress notifier.
*
* @param[in] chan VI channel context
* @param[in] req VI capture progress status setup config
*
* @returns 0 (success), neg. errno (failure)
*/
int vi_capture_set_progress_status_notifier(
struct tegra_vi_channel *chan,
struct vi_capture_progress_status_req *req);
#endif /* __FUSA_CAPTURE_VI_H__ */

444
include/media/mc_common.h Normal file
View File

@@ -0,0 +1,444 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra Media controller common APIs
*
* Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __CAMERA_MC_COMMON_H__
#define __CAMERA_MC_COMMON_H__
#include <media/media-device.h>
#include <media/media-entity.h>
#include <media/sensor_common.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
#include <media/videobuf2-core.h>
#include <media/tegra_camera_core.h>
#include <media/csi.h>
#include <linux/workqueue.h>
#include <linux/semaphore.h>
#include <linux/rwsem.h>
#include <linux/version.h>
#define MAX_FORMAT_NUM 64
#define MAX_SUBDEVICES 4
#define QUEUED_BUFFERS 4
#define ENABLE 1
#define DISABLE 0
#define MAX_SYNCPT_PER_CHANNEL 3
#define CAPTURE_MIN_BUFFERS 1U
#define CAPTURE_MAX_BUFFERS 240U
#define TEGRA_MEM_FORMAT 0
#define TEGRA_ISP_FORMAT 1
enum channel_capture_state {
CAPTURE_IDLE = 0,
CAPTURE_GOOD,
CAPTURE_TIMEOUT,
CAPTURE_ERROR,
};
enum tegra_vi_pg_mode {
TEGRA_VI_PG_DISABLED = 0,
TEGRA_VI_PG_DIRECT,
TEGRA_VI_PG_PATCH,
};
enum interlaced_type {
Top_Bottom = 0,
Interleaved,
};
/**
* struct tegra_channel_buffer - video channel buffer
* @buf: vb2 buffer base object
* @queue: buffer list entry in the channel queued buffers list
* @chan: channel that uses the buffer
* @vb2_state: V4L2 buffer state (active, done, error)
* @capture_descr_index: Index into the VI capture descriptor queue
* @addr: Tegra IOVA buffer address for VI output
*/
struct tegra_channel_buffer {
struct vb2_v4l2_buffer buf;
struct list_head queue;
struct tegra_channel *chan;
unsigned int vb2_state;
unsigned int capture_descr_index[TEGRA_CSI_BLOCKS];
dma_addr_t addr;
u32 thresh[TEGRA_CSI_BLOCKS];
int version;
int state;
};
#define to_tegra_channel_buffer(vb) \
container_of(vb, struct tegra_channel_buffer, buf)
/**
* struct tegra_vi_graph_entity - Entity in the video graph
* @list: list entry in a graph entities list
* @node: the entity's DT node
* @entity: media entity, from the corresponding V4L2 subdev
* @asd: subdev asynchronous registration information
* @subdev: V4L2 subdev
*/
struct tegra_vi_graph_entity {
struct list_head list;
struct device_node *node;
struct media_entity *entity;
struct v4l2_async_subdev asd;
struct v4l2_subdev *subdev;
};
/**
* struct tegra_channel - Tegra video channel
* @list: list entry in a composite device dmas list
* @video: V4L2 video device associated with the video channel
* @video_lock:
* @pad: media pad for the video device entity
* @pipe: pipeline belonging to the channel
*
* @vi: composite device DT node port number for the channel
*
* @kthread_capture: kernel thread task structure of this video channel
* @wait: wait queue structure for kernel thread
*
* @format: active V4L2 pixel format
* @fmtinfo: format information corresponding to the active @format
*
* @queue: vb2 buffers queue
* @alloc_ctx: allocation context for the vb2 @queue
* @sequence: V4L2 buffers sequence number
*
* @capture: list of queued buffers for capture
* @queued_lock: protects the buf_queued list
*
* @csi: CSI register bases
* @stride_align: channel buffer stride alignment, default is 1
* @width_align: image width alignment, default is 1
* @height_align: channel buffer height alignment, default is 1
* @size_align: channel buffer size alignment, default is 1
* @port: CSI port of this video channel
* @io_id: Tegra IO rail ID of this video channel
*
* @fmts_bitmap: a bitmap for formats supported
* @bypass: bypass flag for VI bypass mode
* @restart_version: incremented every time either capture or release threads
* wants to reset VI. it is appended to each buffer processed
* by the capture thread, and inspected by each buffer
* processed by the receive thread.
* @capture_version: thread-local copy of @restart_version created when the
* capture thread resets the VI.
*/
struct tegra_channel {
unsigned int id;
struct list_head list;
struct video_device *video;
struct media_pad pad;
struct media_pipeline pipe;
struct mutex video_lock;
struct tegra_mc_vi *vi;
struct v4l2_subdev *subdev[MAX_SUBDEVICES];
struct v4l2_subdev *subdev_on_csi;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_pix_format format;
const struct tegra_video_format *fmtinfo;
const struct tegra_video_format *video_formats[MAX_FORMAT_NUM];
unsigned int num_video_formats;
struct mutex stop_kthread_lock;
unsigned char port[TEGRA_CSI_BLOCKS];
unsigned int virtual_channel;
unsigned int syncpt[TEGRA_CSI_BLOCKS][MAX_SYNCPT_PER_CHANNEL];
unsigned int syncpoint_fifo[TEGRA_CSI_BLOCKS][MAX_SYNCPT_PER_CHANNEL];
unsigned int buffer_offset[TEGRA_CSI_BLOCKS];
unsigned int *buffer_state;
struct vb2_v4l2_buffer **buffers;
unsigned long timeout;
atomic_t restart_version;
int capture_version;
unsigned int save_index;
unsigned int free_index;
unsigned int num_buffers;
spinlock_t buffer_lock;
unsigned int released_bufs;
unsigned int capture_queue_depth;
unsigned int capture_descr_index;
unsigned int capture_descr_sequence;
unsigned int capture_reqs_enqueued;
struct task_struct *kthread_capture_start;
struct task_struct *kthread_release;
wait_queue_head_t start_wait;
wait_queue_head_t release_wait;
struct task_struct *kthread_capture_dequeue;
wait_queue_head_t dequeue_wait;
struct vb2_queue queue;
void *alloc_ctx;
bool init_done;
struct list_head capture;
struct list_head release;
struct list_head dequeue;
spinlock_t start_lock;
spinlock_t release_lock;
spinlock_t dequeue_lock;
struct work_struct status_work;
struct work_struct error_work;
void __iomem *csibase[TEGRA_CSI_BLOCKS];
unsigned int stride_align;
unsigned int preferred_stride;
unsigned int width_align;
unsigned int height_align;
unsigned int size_align;
unsigned int valid_ports;
unsigned int total_ports;
unsigned int numlanes;
unsigned int io_id;
unsigned int num_subdevs;
unsigned int sequence;
unsigned int saved_ctx_bypass;
unsigned int saved_ctx_pgmode;
unsigned int gang_mode;
unsigned int gang_width;
unsigned int gang_height;
unsigned int gang_bytesperline;
unsigned int gang_sizeimage;
unsigned int embedded_data_width;
unsigned int embedded_data_height;
DECLARE_BITMAP(fmts_bitmap, MAX_FORMAT_NUM);
atomic_t power_on_refcnt;
struct v4l2_fh *fh;
bool bypass;
bool write_ispformat;
bool low_latency;
enum tegra_vi_pg_mode pg_mode;
bool bfirst_fstart;
enum channel_capture_state capture_state;
bool queue_error;
spinlock_t capture_state_lock;
atomic_t is_streaming;
int requested_kbyteps;
unsigned long requested_hz;
struct vi_notify_channel *vnc[TEGRA_CSI_BLOCKS];
int vnc_id[TEGRA_CSI_BLOCKS];
int grp_id;
struct v4l2_async_notifier notifier;
struct list_head entities;
struct device_node *endpoint_node; /* endpoint of_node in vi */
unsigned int subdevs_bound;
unsigned int link_status;
struct nvcsi_deskew_context *deskew_ctx;
struct tegra_vi_channel *tegra_vi_channel[TEGRA_CSI_BLOCKS];
struct capture_descriptor *request[TEGRA_CSI_BLOCKS];
bool is_slvsec;
int is_interlaced;
enum interlaced_type interlace_type;
int interlace_bplfactor;
atomic_t syncpt_depth;
struct rw_semaphore reset_lock;
dma_addr_t emb_buf;
void *emb_buf_addr;
unsigned int emb_buf_size;
};
#define to_tegra_channel(vdev) \
container_of(vdev, struct tegra_channel, video)
/**
* struct tegra_mc_vi - NVIDIA Tegra Media controller structure
* @v4l2_dev: V4L2 device
* @media_dev: media device
* @dev: device struct
* @tegra_camera: tegra camera structure
* @nvhost_device_data: NvHost VI device information
*
* @notifier: V4L2 asynchronous subdevs notifier
* @entities: entities in the graph as a list of tegra_vi_graph_entity
* @num_subdevs: number of subdevs in the pipeline
*
* @channels: list of channels at the pipeline output and input
*
* @ctrl_handler: V4L2 control handler
* @pattern: test pattern generator V4L2 control
* @pg_mode: test pattern generator mode (disabled/direct/patch)
*
* @has_sensors: a flag to indicate whether is a real sensor connecting
*/
struct tegra_mc_vi {
struct vi *vi;
struct platform_device *ndev;
struct v4l2_device v4l2_dev;
struct media_device media_dev;
struct device *dev;
struct nvhost_device_data *ndata;
struct regulator *reg;
struct clk *clk;
struct clk *parent_clk;
unsigned int num_channels;
unsigned int num_subdevs;
struct tegra_csi_device *csi;
struct list_head vi_chans;
struct tegra_channel *tpg_start;
void __iomem *iomem;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *pattern;
enum tegra_vi_pg_mode pg_mode;
bool has_sensors;
atomic_t power_on_refcnt;
atomic_t vb2_dma_alloc_refcnt;
struct mutex bw_update_lock;
unsigned long aggregated_kbyteps;
unsigned long max_requested_hz;
struct mutex mipical_lock;
bool bypass;
const struct tegra_vi_fops *fops;
};
int tegra_vi_get_port_info(struct tegra_channel *chan,
struct device_node *node, unsigned int index);
void tegra_vi_v4l2_cleanup(struct tegra_mc_vi *vi);
int tegra_vi_v4l2_init(struct tegra_mc_vi *vi);
int tegra_vi_tpg_graph_init(struct tegra_mc_vi *vi);
int tegra_vi_graph_init(struct tegra_mc_vi *vi);
void tegra_vi_graph_cleanup(struct tegra_mc_vi *vi);
int tegra_channel_init(struct tegra_channel *chan);
void tegra_vi_channels_unregister(struct tegra_mc_vi *vi);
int tegra_vi_channels_init(struct tegra_mc_vi *vi);
int tegra_channel_cleanup(struct tegra_channel *chan);
int tegra_vi_channels_cleanup(struct tegra_mc_vi *vi);
int tegra_channel_init_subdevices(struct tegra_channel *chan);
void tegra_channel_remove_subdevices(struct tegra_channel *chan);
struct v4l2_subdev *tegra_channel_find_linked_csi_subdev(
struct tegra_channel *chan);
int tegra_vi2_power_on(struct tegra_mc_vi *vi);
void tegra_vi2_power_off(struct tegra_mc_vi *vi);
int tegra_vi4_power_on(struct tegra_mc_vi *vi);
void tegra_vi4_power_off(struct tegra_mc_vi *vi);
int tegra_vi5_enable(struct tegra_mc_vi *vi);
void tegra_vi5_disable(struct tegra_mc_vi *vi);
int tegra_clean_unlinked_channels(struct tegra_mc_vi *vi);
int tegra_channel_s_ctrl(struct v4l2_ctrl *ctrl);
int tegra_vi_media_controller_init(struct tegra_mc_vi *mc_vi,
struct platform_device *pdev);
int tegra_capture_vi_media_controller_init(struct tegra_mc_vi *mc_vi,
struct platform_device *pdev);
void tegra_vi_media_controller_cleanup(struct tegra_mc_vi *mc_vi);
void tegra_channel_ec_close(struct tegra_mc_vi *mc_vi);
void tegra_channel_query_hdmiin_unplug(struct tegra_channel *chan,
struct v4l2_event *event);
int tegra_vi_mfi_work(struct tegra_mc_vi *vi, int csiport);
int tpg_vi_media_controller_init(struct tegra_mc_vi *mc_vi, int pg_mode);
void tpg_vi_media_controller_cleanup(struct tegra_mc_vi *mc_vi);
struct tegra_mc_vi *tegra_get_mc_vi(void);
u32 tegra_core_get_fourcc_by_idx(struct tegra_channel *chan,
unsigned int index);
int tegra_core_get_idx_by_code(struct tegra_channel *chan,
unsigned int code, unsigned offset);
int tegra_core_get_code_by_fourcc(struct tegra_channel *chan,
unsigned int fourcc, unsigned offset);
const struct tegra_video_format *tegra_core_get_format_by_code(
struct tegra_channel *chan,
unsigned int code, unsigned offset);
const struct tegra_video_format *tegra_core_get_format_by_fourcc(
struct tegra_channel *chan, u32 fourcc);
void tegra_channel_queued_buf_done(struct tegra_channel *chan,
enum vb2_buffer_state state, bool multi_queue);
int tegra_channel_set_stream(struct tegra_channel *chan, bool on);
int tegra_channel_write_blobs(struct tegra_channel *chan);
void tegra_channel_ring_buffer(struct tegra_channel *chan,
struct vb2_v4l2_buffer *vb,
struct timespec64 *ts, int state);
struct tegra_channel_buffer *dequeue_buffer(struct tegra_channel *chan,
bool requeue);
struct tegra_channel_buffer *dequeue_dequeue_buffer(struct tegra_channel *chan);
int tegra_channel_error_recover(struct tegra_channel *chan, bool queue_error);
int tegra_channel_alloc_buffer_queue(struct tegra_channel *chan,
unsigned int num_buffers);
void tegra_channel_dealloc_buffer_queue(struct tegra_channel *chan);
void tegra_channel_init_ring_buffer(struct tegra_channel *chan);
void free_ring_buffers(struct tegra_channel *chan, int frames);
void release_buffer(struct tegra_channel *chan,
struct tegra_channel_buffer *buf);
void set_timestamp(struct tegra_channel_buffer *buf,
const struct timespec64 *ts);
void enqueue_inflight(struct tegra_channel *chan,
struct tegra_channel_buffer *buf);
struct tegra_channel_buffer *dequeue_inflight(struct tegra_channel *chan);
int tegra_channel_set_power(struct tegra_channel *chan, bool on);
int tegra_channel_init_video(struct tegra_channel *chan);
int tegra_channel_cleanup_video(struct tegra_channel *chan);
struct tegra_vi_fops {
int (*vi_power_on)(struct tegra_channel *chan);
void (*vi_power_off)(struct tegra_channel *chan);
int (*vi_start_streaming)(struct vb2_queue *vq, u32 count);
int (*vi_stop_streaming)(struct vb2_queue *vq);
int (*vi_setup_queue)(struct tegra_channel *chan,
unsigned int *nbuffers);
int (*vi_error_recover)(struct tegra_channel *chan, bool queue_error);
int (*vi_add_ctrls)(struct tegra_channel *chan);
void (*vi_init_video_formats)(struct tegra_channel *chan);
long (*vi_default_ioctl)(struct file *file, void *fh,
bool use_prio, unsigned int cmd, void *arg);
int (*vi_mfi_work)(struct tegra_mc_vi *vi, int port);
void (*vi_stride_align)(unsigned int *bpl);
void (*vi_unit_get_device_handle)(struct platform_device *pdev,
uint32_t csi_steam_id, struct device **dev);
};
struct tegra_csi_fops {
int (*csi_power_on)(struct tegra_csi_device *csi);
int (*csi_power_off)(struct tegra_csi_device *csi);
int (*csi_start_streaming)(struct tegra_csi_channel *chan,
int port_idx);
void (*csi_stop_streaming)(struct tegra_csi_channel *chan,
int port_idx);
void (*csi_override_format)(struct tegra_csi_channel *chan,
int port_idx);
int (*csi_error_recover)(struct tegra_csi_channel *chan, int port_idx);
int (*mipical)(struct tegra_csi_channel *chan);
int (*hw_init)(struct tegra_csi_device *csi);
int (*tpg_set_gain)(struct tegra_csi_channel *chan, int gain_ratio_tpg);
};
struct tegra_t210_vi_data {
struct nvhost_device_data *info;
const struct tegra_vi_fops *vi_fops;
const struct tegra_csi_fops *csi_fops;
};
struct tegra_vi_data {
struct nvhost_device_data *info;
const struct tegra_vi_fops *vi_fops;
};
struct tegra_csi_data {
struct nvhost_device_data *info;
const struct tegra_csi_fops *csi_fops;
};
#endif

102
include/media/nvc_focus.h Normal file
View File

@@ -0,0 +1,102 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2022 NVIDIA Corporation. All rights reserved.
*/
#ifndef __NVC_FOCUS_H__
#define __NVC_FOCUS_H__
/* NVC_FOCUS_CAP_VER0: invalid */
/* NVC_FOCUS_CAP_VER1:
* __u32 version
* __u32 actuator_range
* __u32 settle_time
*/
#define NVC_FOCUS_CAP_VER1 1
/* NVC_FOCUS_CAP_VER2 adds:
* __u32 focus_macro;
* __u32 focus_hyper;
* __u32 focus_infinity;
*/
#define NVC_FOCUS_CAP_VER2 2
#define NVC_FOCUS_CAP_VER 2 /* latest version */
#define AF_POS_INVALID_VALUE INT_MAX
/* These are the slew rate values coming down from the configuration */
/* Disabled is the same as fastest. Default is the default */
/* slew rate configuration in the focuser */
#define SLEW_RATE_DISABLED 0
#define SLEW_RATE_DEFAULT 1
#define SLEW_RATE_SLOWEST 9
enum nvc_focus_sts {
NVC_FOCUS_STS_UNKNOWN = 1,
NVC_FOCUS_STS_NO_DEVICE,
NVC_FOCUS_STS_INITIALIZING,
NVC_FOCUS_STS_INIT_ERR,
NVC_FOCUS_STS_WAIT_FOR_MOVE_END,
NVC_FOCUS_STS_WAIT_FOR_SETTLE,
NVC_FOCUS_STS_LENS_SETTLED,
NVC_FOCUS_STS_FORCE32 = 0x7FFFFFFF
};
struct nvc_focus_nvc {
__u32 focal_length;
__u32 fnumber;
__u32 max_aperture;
} __packed;
struct nvc_focus_cap {
__u32 version;
__s32 actuator_range;
__u32 settle_time;
__s32 focus_macro;
__s32 focus_hyper;
__s32 focus_infinity;
__u32 slew_rate;
__u32 position_translate;
} __packed;
#define NV_FOCUSER_SET_MAX 10
#define NV_FOCUSER_SET_DISTANCE_PAIR 16
struct nv_focuser_set_dist_pairs {
__s32 fdn;
__s32 distance;
} __packed;
struct nv_focuser_set {
__s32 posture;
__s32 macro;
__s32 hyper;
__s32 inf;
__s32 hysteresis;
__u32 settle_time;
__s32 macro_offset;
__s32 inf_offset;
__u32 num_dist_pairs;
struct nv_focuser_set_dist_pairs
dist_pair[NV_FOCUSER_SET_DISTANCE_PAIR];
} __packed;
struct nv_focuser_config {
__u32 focal_length;
__u32 fnumber;
__u32 max_aperture;
__u32 range_ends_reversed;
__s32 pos_working_low;
__s32 pos_working_high;
__s32 pos_actual_low;
__s32 pos_actual_high;
__u32 slew_rate;
__u32 circle_of_confusion;
__u32 num_focuser_sets;
struct nv_focuser_set focuser_set[NV_FOCUSER_SET_MAX];
} __packed;
#endif /* __NVC_FOCUS_H__ */

View File

@@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sensor_common.h - utilities for tegra camera driver
*
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef __sensor_common__
#define __sensor_common__
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
#include <linux/v4l2-mediabus.h>
#include <media/tegra-v4l2-camera.h>
#include <uapi/media/camera_device.h>
struct sensor_properties {
struct sensor_cfg cfg;
/* sensor_modes points to an array of mode properties */
struct sensor_mode_properties *sensor_modes;
u32 num_modes;
};
int sensor_common_parse_num_modes(const struct device *dev);
int sensor_common_init_sensor_properties(
struct device *dev, struct device_node *node,
struct sensor_properties *sensor);
#endif /* __sensor_common__ */

View File

@@ -0,0 +1,174 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TEGRA_V4L2_CAMERA.h - utilities for tegra camera driver
*
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef __TEGRA_V4L2_CAMERA__
#define __TEGRA_V4L2_CAMERA__
#include <linux/v4l2-controls.h>
#define TEGRA_CAMERA_CID_BASE (V4L2_CTRL_CLASS_CAMERA | 0x2000)
#define TEGRA_CAMERA_CID_FRAME_LENGTH (TEGRA_CAMERA_CID_BASE+0)
#define TEGRA_CAMERA_CID_COARSE_TIME (TEGRA_CAMERA_CID_BASE+1)
#define TEGRA_CAMERA_CID_COARSE_TIME_SHORT (TEGRA_CAMERA_CID_BASE+2)
#define TEGRA_CAMERA_CID_GROUP_HOLD (TEGRA_CAMERA_CID_BASE+3)
#define TEGRA_CAMERA_CID_HDR_EN (TEGRA_CAMERA_CID_BASE+4)
#define TEGRA_CAMERA_CID_EEPROM_DATA (TEGRA_CAMERA_CID_BASE+5)
#define TEGRA_CAMERA_CID_OTP_DATA (TEGRA_CAMERA_CID_BASE+6)
#define TEGRA_CAMERA_CID_FUSE_ID (TEGRA_CAMERA_CID_BASE+7)
#define TEGRA_CAMERA_CID_SENSOR_MODE_ID (TEGRA_CAMERA_CID_BASE+8)
#define TEGRA_CAMERA_CID_GAIN (TEGRA_CAMERA_CID_BASE+9)
#define TEGRA_CAMERA_CID_EXPOSURE (TEGRA_CAMERA_CID_BASE+10)
#define TEGRA_CAMERA_CID_FRAME_RATE (TEGRA_CAMERA_CID_BASE+11)
#define TEGRA_CAMERA_CID_EXPOSURE_SHORT (TEGRA_CAMERA_CID_BASE+12)
#define TEGRA_CAMERA_CID_STEREO_EEPROM (TEGRA_CAMERA_CID_BASE+13)
#define TEGRA_CAMERA_CID_SENSOR_CONFIG (TEGRA_CAMERA_CID_BASE+50)
#define TEGRA_CAMERA_CID_SENSOR_MODE_BLOB (TEGRA_CAMERA_CID_BASE+51)
#define TEGRA_CAMERA_CID_SENSOR_CONTROL_BLOB (TEGRA_CAMERA_CID_BASE+52)
#define TEGRA_CAMERA_CID_GAIN_TPG (TEGRA_CAMERA_CID_BASE+70)
#define TEGRA_CAMERA_CID_GAIN_TPG_EMB_DATA_CFG (TEGRA_CAMERA_CID_BASE+71)
#define TEGRA_CAMERA_CID_VI_BYPASS_MODE (TEGRA_CAMERA_CID_BASE+100)
#define TEGRA_CAMERA_CID_OVERRIDE_ENABLE (TEGRA_CAMERA_CID_BASE+101)
#define TEGRA_CAMERA_CID_VI_HEIGHT_ALIGN (TEGRA_CAMERA_CID_BASE+102)
#define TEGRA_CAMERA_CID_VI_SIZE_ALIGN (TEGRA_CAMERA_CID_BASE+103)
#define TEGRA_CAMERA_CID_WRITE_ISPFORMAT (TEGRA_CAMERA_CID_BASE+104)
#define TEGRA_CAMERA_CID_SENSOR_SIGNAL_PROPERTIES (TEGRA_CAMERA_CID_BASE+105)
#define TEGRA_CAMERA_CID_SENSOR_IMAGE_PROPERTIES (TEGRA_CAMERA_CID_BASE+106)
#define TEGRA_CAMERA_CID_SENSOR_CONTROL_PROPERTIES (TEGRA_CAMERA_CID_BASE+107)
#define TEGRA_CAMERA_CID_SENSOR_DV_TIMINGS (TEGRA_CAMERA_CID_BASE+108)
#define TEGRA_CAMERA_CID_LOW_LATENCY (TEGRA_CAMERA_CID_BASE+109)
#define TEGRA_CAMERA_CID_VI_PREFERRED_STRIDE (TEGRA_CAMERA_CID_BASE+110)
/**
* This is temporary with the current v4l2 infrastructure
* currently discussing with upstream maintainers our proposals and
* better approaches to resolve this
*/
#define TEGRA_CAMERA_CID_SENSOR_MODES (TEGRA_CAMERA_CID_BASE + 130)
#define MAX_BUFFER_SIZE 32
#define MAX_CID_CONTROLS 32
#define MAX_NUM_SENSOR_MODES 30
#define OF_MAX_STR_LEN 256
#define OF_SENSORMODE_PREFIX ("mode")
/*
* Scaling factor for converting a Q10.22 fixed point value
* back to its original floating point value
*/
#define FIXED_POINT_SCALING_FACTOR (1ULL << 22)
#define TEGRA_CAM_MAX_STRING_CONTROLS 8
#define TEGRA_CAM_STRING_CTRL_EEPROM_INDEX 0
#define TEGRA_CAM_STRING_CTRL_FUSEID_INDEX 1
#define TEGRA_CAM_STRING_CTRL_OTP_INDEX 2
#define TEGRA_CAM_MAX_COMPOUND_CONTROLS 4
#define TEGRA_CAM_COMPOUND_CTRL_EEPROM_INDEX 0
#define CSI_PHY_MODE_DPHY 0
#define CSI_PHY_MODE_CPHY 1
#define SLVS_EC 2
struct unpackedU64 {
__u32 high;
__u32 low;
};
union __u64val {
struct unpackedU64 unpacked;
__u64 val;
};
struct sensor_signal_properties {
__u32 readout_orientation;
__u32 num_lanes;
__u32 mclk_freq;
union __u64val pixel_clock;
__u32 cil_settletime;
__u32 discontinuous_clk;
__u32 dpcm_enable;
__u32 tegra_sinterface;
__u32 phy_mode;
__u32 deskew_initial_enable;
__u32 deskew_periodic_enable;
union __u64val serdes_pixel_clock;
union __u64val mipi_clock;
};
struct sensor_image_properties {
__u32 width;
__u32 height;
__u32 line_length;
__u32 pixel_format;
__u32 embedded_metadata_height;
__u32 reserved[11];
};
struct sensor_dv_timings {
__u32 hfrontporch;
__u32 hsync;
__u32 hbackporch;
__u32 vfrontporch;
__u32 vsync;
__u32 vbackporch;
__u32 reserved[10];
};
struct sensor_control_properties {
__u32 gain_factor;
__u32 framerate_factor;
__u32 inherent_gain;
__u32 min_gain_val;
__u32 max_gain_val;
__u32 min_hdr_ratio;
__u32 max_hdr_ratio;
__u32 min_framerate;
__u32 max_framerate;
union __u64val min_exp_time;
union __u64val max_exp_time;
__u32 step_gain_val;
__u32 step_framerate;
__u32 exposure_factor;
union __u64val step_exp_time;
__u32 default_gain;
__u32 default_framerate;
union __u64val default_exp_time;
__u32 is_interlaced;
__u32 interlace_type;
__u32 reserved[10];
};
struct sensor_mode_properties {
struct sensor_signal_properties signal_properties;
struct sensor_image_properties image_properties;
struct sensor_control_properties control_properties;
struct sensor_dv_timings dv_timings;
};
#define SENSOR_SIGNAL_PROPERTIES_CID_SIZE \
(sizeof(struct sensor_signal_properties) / sizeof(__u32))
#define SENSOR_IMAGE_PROPERTIES_CID_SIZE \
(sizeof(struct sensor_image_properties) / sizeof(__u32))
#define SENSOR_CONTROL_PROPERTIES_CID_SIZE \
(sizeof(struct sensor_control_properties) / sizeof(__u32))
#define SENSOR_DV_TIMINGS_CID_SIZE \
(sizeof(struct sensor_dv_timings) / sizeof(__u32))
#define SENSOR_MODE_PROPERTIES_CID_SIZE \
(sizeof(struct sensor_mode_properties) / sizeof(__u32))
#define SENSOR_CONFIG_SIZE \
(sizeof(struct sensor_cfg) / sizeof(__u32))
#define SENSOR_MODE_BLOB_SIZE \
(sizeof(struct sensor_blob) / sizeof(__u32))
#define SENSOR_CTRL_BLOB_SIZE \
(sizeof(struct sensor_blob) / sizeof(__u32))
#endif /* __TEGRA_V4L2_CAMERA__ */

View File

@@ -0,0 +1,126 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NVIDIA Tegra Video Input Device Driver Core Helpers
*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __TEGRA_CORE_H__
#define __TEGRA_CORE_H__
#include <media/v4l2-subdev.h>
/* Minimum and maximum width and height common to Tegra video input device. */
#define TEGRA_MIN_WIDTH 32U
#define TEGRA_MAX_WIDTH 32768U
#define TEGRA_MIN_HEIGHT 32U
#define TEGRA_MAX_HEIGHT 32768U
/* Width alignment */
#define TEGRA_WIDTH_ALIGNMENT 1
/* Stride alignment */
#define TEGRA_STRIDE_ALIGNMENT 1
/* Height alignment */
#define TEGRA_HEIGHT_ALIGNMENT 1
/* Size alignment */
#define TEGRA_SIZE_ALIGNMENT 0
/* 1080p resolution as default resolution for test pattern generator */
#define TEGRA_DEF_WIDTH 1920
#define TEGRA_DEF_HEIGHT 1080
#define TEGRA_VF_DEF MEDIA_BUS_FMT_SRGGB10_1X10
#define TEGRA_IMAGE_FORMAT_DEF 32
enum tegra_image_dt {
TEGRA_IMAGE_DT_YUV420_8 = 24,
TEGRA_IMAGE_DT_YUV420_10,
TEGRA_IMAGE_DT_YUV420CSPS_8 = 28,
TEGRA_IMAGE_DT_YUV420CSPS_10,
TEGRA_IMAGE_DT_YUV422_8,
TEGRA_IMAGE_DT_YUV422_10,
TEGRA_IMAGE_DT_RGB444,
TEGRA_IMAGE_DT_RGB555,
TEGRA_IMAGE_DT_RGB565,
TEGRA_IMAGE_DT_RGB666,
TEGRA_IMAGE_DT_RGB888,
TEGRA_IMAGE_DT_RAW6 = 40,
TEGRA_IMAGE_DT_RAW7,
TEGRA_IMAGE_DT_RAW8,
TEGRA_IMAGE_DT_RAW10,
TEGRA_IMAGE_DT_RAW12,
TEGRA_IMAGE_DT_RAW14,
};
/* Supported CSI to VI Data Formats */
enum tegra_vf_code {
TEGRA_VF_RAW6 = 0,
TEGRA_VF_RAW7,
TEGRA_VF_RAW8,
TEGRA_VF_RAW10,
TEGRA_VF_RAW12,
TEGRA_VF_RAW14,
TEGRA_VF_EMBEDDED8,
TEGRA_VF_RGB565,
TEGRA_VF_RGB555,
TEGRA_VF_RGB888,
TEGRA_VF_RGB444,
TEGRA_VF_RGB666,
TEGRA_VF_YUV422,
TEGRA_VF_YUV420,
TEGRA_VF_YUV420_CSPS,
};
/**
* struct tegra_frac
* @numerator: numerator of the fraction
* @denominator: denominator of the fraction
*/
struct tegra_frac {
unsigned int numerator;
unsigned int denominator;
};
/**
* struct tegra_video_format - Tegra video format description
* @vf_code: video format code
* @width: format width in bits per component
* @code: media bus format code
* @bpp: bytes per pixel fraction (when stored in memory)
* @img_fmt: image format
* @img_dt: image data type
* @fourcc: V4L2 pixel format FCC identifier
* @description: format description, suitable for userspace
*/
struct tegra_video_format {
enum tegra_vf_code vf_code;
unsigned int width;
unsigned int code;
struct tegra_frac bpp;
u32 img_fmt;
enum tegra_image_dt img_dt;
u32 fourcc;
__u8 description[32];
};
#define TEGRA_VIDEO_FORMAT(VF_CODE, BPP, MBUS_CODE, FRAC_BPP_NUM, \
FRAC_BPP_DEN, FORMAT, DATA_TYPE, FOURCC, DESCRIPTION) \
{ \
TEGRA_VF_##VF_CODE, \
BPP, \
MEDIA_BUS_FMT_##MBUS_CODE, \
{FRAC_BPP_NUM, FRAC_BPP_DEN}, \
TEGRA_IMAGE_FORMAT_##FORMAT, \
TEGRA_IMAGE_DT_##DATA_TYPE, \
V4L2_PIX_FMT_##FOURCC, \
DESCRIPTION, \
}
u32 tegra_core_get_word_count(unsigned int frame_width,
const struct tegra_video_format *fmt);
u32 tegra_core_bytes_per_line(unsigned int width, unsigned int align,
const struct tegra_video_format *fmt);
const struct tegra_video_format *tegra_core_get_default_format(void);
#endif

View File

@@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __CAMERA_DEV_MFI_H__
#define __CAMERA_DEV_MFI_H__
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#define CAMERA_MAX_NAME_LENGTH 32
#define CAMERA_REGCACHE_MAX (128)
struct cam_reg {
u32 addr;
u32 val;
};
struct cam_i2c_msg {
struct i2c_msg msg;
u8 buf[8];
};
struct camera_mfi_dev {
char name[CAMERA_MAX_NAME_LENGTH];
struct regmap *regmap;
struct cam_reg reg[CAMERA_REGCACHE_MAX];
struct cam_reg prev_reg[CAMERA_REGCACHE_MAX];
struct i2c_client *i2c_client;
struct cam_i2c_msg msg[CAMERA_REGCACHE_MAX];
u32 num_used;
u32 prev_num_used;
struct list_head list;
};
struct mfi_cb_arg {
u8 vi_chan;
};
void tegra_camera_dev_mfi_cb(void *stub);
int tegra_camera_dev_mfi_clear(struct camera_mfi_dev *cmfidev);
int tegra_camera_dev_mfi_wr_add(
struct camera_mfi_dev *cmfidev, u32 offset, u32 val);
int tegra_camera_dev_mfi_wr_add_i2c(
struct camera_mfi_dev *cmfidev, struct i2c_msg *msg, int num);
int tegra_camera_dev_mfi_add_regmap(
struct camera_mfi_dev **cmfidev, u8 *name, struct regmap *regmap);
int tegra_camera_dev_mfi_add_i2cclient(
struct camera_mfi_dev **cmfidev, u8 *name,
struct i2c_client *i2c_client);
#endif
/* __CAMERA_DEV_MFI_H__ */

View File

@@ -0,0 +1,95 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef _TEGRA_CAMERA_PLATFORM_H_
#define _TEGRA_CAMERA_PLATFORM_H_
#include <uapi/media/tegra_camera_platform.h>
/* avoid overflows */
#define DEFAULT_PG_CLK_RATE (UINT_MAX - 1)
/**
* enum tegra_camera_hw_type - camera hw engines
*/
enum tegra_camera_hw_type {
HWTYPE_NONE = 0,
HWTYPE_CSI,
HWTYPE_SLVSEC,
HWTYPE_VI,
HWTYPE_ISPA,
HWTYPE_ISPB,
HWTYPE_MAX,
};
/**
* enum tegra_camera_sensor_type - camera sensor types
*/
enum tegra_camera_sensor_type {
SENSORTYPE_NONE = 0,
SENSORTYPE_DPHY,
SENSORTYPE_CPHY,
SENSORTYPE_SLVSEC,
SENSORTYPE_VIRTUAL,
/* HDMI-IN or other inputs */
SENSORTYPE_OTHER,
SENSORTYPE_MAX,
};
/**
* struct tegra_camera_dev_info - camera devices information
* @priv: a unique identifier assigned during registration
* @hw_type: type of HW engine as defined by the enum above
* @bus_width: csi bus width for clock calculation
* @overhead: hw/ sw overhead considered while calculations
* @ppc: HW capability, pixels per clock
* @clk_rate: calculated clk rate for this node
* @actual_clk_rate: clk rate set by nvhost
* @bw: calculated bw for this node
* @use_max: populated by hw engine to decide it's clocking policy
* @memory_latency: latency allowed for memory freq scaling
* @pdev: pointer to platform_data
* @sensor_type: type of sensor as defined by the enum above
* @pixel_rate: pixel rate coming out of the sensor
* @pixel_bit_depth: bits per pixel
* @bpp: bytes per pixel
* @stream_on: stream enabled on the channel
* @device_node: list node
*/
struct tegra_camera_dev_info {
void *priv;
u32 hw_type;
u32 bus_width;
u32 overhead;
u64 lane_speed;
u32 lane_num;
u32 ppc;
u64 clk_rate;
u64 pg_clk_rate;
unsigned long actual_clk_rate;
u64 bw;
bool use_max;
u32 memory_latency;
struct platform_device *pdev;
u32 sensor_type;
u64 pixel_rate;
u32 pixel_bit_depth;
u32 bpp;
bool stream_on;
struct list_head device_node;
};
int tegra_camera_update_isobw(void);
int tegra_camera_emc_clk_enable(void);
int tegra_camera_emc_clk_disable(void);
int tegra_camera_device_register(struct tegra_camera_dev_info *cdev_info,
void *priv);
int tegra_camera_device_unregister(void *priv);
int tegra_camera_get_device_list_entry(const u32 hw_type, const void *priv,
struct tegra_camera_dev_info *cdev_info);
int tegra_camera_get_device_list_stats(u32 *n_sensors, u32 *n_hwtypes);
int tegra_camera_update_clknbw(void *priv, bool stream_on);
#endif

View File

@@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef _TEGRA_CAMERA_H_
#define _TEGRA_CAMERA_H_
#include <linux/regulator/consumer.h>
#include <linux/i2c.h>
enum tegra_camera_port {
TEGRA_CAMERA_PORT_CSI_A = 0,
TEGRA_CAMERA_PORT_CSI_B,
TEGRA_CAMERA_PORT_CSI_C,
TEGRA_CAMERA_PORT_CSI_D,
TEGRA_CAMERA_PORT_CSI_E,
TEGRA_CAMERA_PORT_CSI_F,
TEGRA_CAMERA_PORT_VIP,
};
struct tegra_camera_platform_data {
int (*enable_camera)(struct platform_device *pdev);
void (*disable_camera)(struct platform_device *pdev);
bool flip_h;
bool flip_v;
enum tegra_camera_port port;
int lanes; /* For CSI port only */
bool continuous_clk; /* For CSI port only */
};
struct i2c_camera_ctrl {
int (*new_devices)(struct platform_device *pdev);
void (*remove_devices)(struct platform_device *pdev);
};
#endif /* _TEGRA_CAMERA_H_ */

View File

@@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tegracam_core.h - tegra camera framework core utilities
*
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef __TEGRACAM_CORE_H__
#define __TEGRACAM_CORE_H__
#include <media/camera_common.h>
struct tegracam_device {
struct camera_common_data *s_data;
struct media_pad pad;
u32 version;
bool is_streaming;
/* variables to be filled by the driver to register */
char name[32];
struct i2c_client *client;
struct device *dev;
u32 numctrls;
const u32 *ctrl_cid_list;
const struct regmap_config *dev_regmap_config;
struct camera_common_sensor_ops *sensor_ops;
const struct v4l2_subdev_ops *v4l2sd_ops;
const struct v4l2_subdev_internal_ops *v4l2sd_internal_ops;
const struct media_entity_operations *media_ops;
const struct tegracam_ctrl_ops *tcctrl_ops;
void *priv;
};
u32 tegracam_version(u8 major, u8 minor, u8 patch);
u32 tegracam_query_version(const char *of_dev_name);
struct tegracam_device *to_tegracam_device(struct camera_common_data *data);
void tegracam_set_privdata(struct tegracam_device *tc_dev, void *priv);
void *tegracam_get_privdata(struct tegracam_device *tc_dev);
int tegracam_v4l2subdev_register(struct tegracam_device *tc_dev,
bool is_sensor);
void tegracam_v4l2subdev_unregister(struct tegracam_device *tc_dev);
int tegracam_device_register(struct tegracam_device *tc_dev);
void tegracam_device_unregister(struct tegracam_device *tc_dev);
#endif

View File

@@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/**
* tegracam_utils.h - tegra camera framework core utilities
*
* Copyright (c) 2018-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef __TEGRACAM_UTILS_H__
#define __TEGRACAM_UTILS_H__
#include <media/camera_common.h>
enum sensor_opcode {
SENSOR_OPCODE_DONE = 0,
SENSOR_OPCODE_READ = 1,
SENSOR_OPCODE_WRITE = 2,
SENSOR_OPCODE_SLEEP = 3,
};
int convert_table_to_blob(struct sensor_blob *pkt,
const struct reg_8 table[],
u16 wait_ms_addr, u16 end_addr);
int write_sensor_blob(struct regmap *regmap, struct sensor_blob *blob);
int tegracam_write_blobs(struct tegracam_ctrl_handler *hdl);
bool is_tvcf_supported(u32 version);
int format_tvcf_version(u32 version, char *buff, size_t size);
void conv_u32_u8arr(u32 val, u8 *buf);
void conv_u16_u8arr(u16 val, u8 *buf);
int prepare_write_cmd(struct sensor_blob *pkt,
u32 size, u32 addr, u8 *buf);
int prepare_read_cmd(struct sensor_blob *pkt,
u32 size, u32 addr);
int prepare_sleep_cmd(struct sensor_blob *pkt, u32 time_in_us);
int prepare_done_cmd(struct sensor_blob *pkt);
#endif

108
include/media/vi.h Normal file
View File

@@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra Graphics Host VI
*
* Copyright (c) 2012-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __NVHOST_VI_H__
#define __NVHOST_VI_H__
#include <linux/clk/tegra.h>
#include <media/mc_common.h>
#define VI_CFG_INTERRUPT_MASK_0 0x8c
#define VI_CFG_INTERRUPT_STATUS_0 0x98
#define CSI_CSI_PIXEL_PARSER_A_INTERRUPT_MASK_0 0x850
#define CSI_CSI_PIXEL_PARSER_A_STATUS_0 0x854
#define PPA_FIFO_OVRF (1 << 5)
#define CSI_CSI_PIXEL_PARSER_B_INTERRUPT_MASK_0 0x884
#define CSI_CSI_PIXEL_PARSER_B_STATUS_0 0x888
#define PPB_FIFO_OVRF (1 << 5)
#define VI_CSI_0_ERROR_STATUS 0x184
#define VI_CSI_1_ERROR_STATUS 0x284
#define VI_CSI_0_WD_CTRL 0x18c
#define VI_CSI_1_WD_CTRL 0x28c
#define VI_CSI_0_ERROR_INT_MASK_0 0x188
#define VI_CSI_1_ERROR_INT_MASK_0 0x288
#ifdef TEGRA_21X_OR_HIGHER_CONFIG
#define VI_CSI_2_ERROR_STATUS 0x384
#define VI_CSI_3_ERROR_STATUS 0x484
#define VI_CSI_4_ERROR_STATUS 0x584
#define VI_CSI_5_ERROR_STATUS 0x684
#define VI_CSI_2_WD_CTRL 0x38c
#define VI_CSI_3_WD_CTRL 0x48c
#define VI_CSI_4_WD_CTRL 0x58c
#define VI_CSI_5_WD_CTRL 0x68c
#define VI_CSI_2_ERROR_INT_MASK_0 0x388
#define VI_CSI_3_ERROR_INT_MASK_0 0x488
#define VI_CSI_4_ERROR_INT_MASK_0 0x588
#define VI_CSI_5_ERROR_INT_MASK_0 0x688
#define CSI1_CSI_PIXEL_PARSER_A_INTERRUPT_MASK_0 0x1050
#define CSI1_CSI_PIXEL_PARSER_A_STATUS_0 0x1054
#define CSI1_CSI_PIXEL_PARSER_B_INTERRUPT_MASK_0 0x1084
#define CSI1_CSI_PIXEL_PARSER_B_STATUS_0 0x1088
#define CSI2_CSI_PIXEL_PARSER_A_INTERRUPT_MASK_0 0x1850
#define CSI2_CSI_PIXEL_PARSER_A_STATUS_0 0x1854
#define CSI2_CSI_PIXEL_PARSER_B_INTERRUPT_MASK_0 0x1884
#define CSI2_CSI_PIXEL_PARSER_B_STATUS_0 0x1888
#define NUM_VI_WATCHDOG 6
#else
#define NUM_VI_WATCHDOG 2
#endif
typedef void (*callback)(void *);
struct tegra_vi_stats {
atomic_t overflow;
};
struct tegra_vi_mfi_ctx;
struct vi {
struct tegra_camera *camera;
struct platform_device *ndev;
struct device *dev;
struct tegra_vi_data *data;
struct tegra_mc_vi mc_vi;
struct tegra_csi_device csi;
struct regulator *reg;
struct dentry *debugdir;
struct tegra_vi_stats vi_out;
struct work_struct stats_work;
struct tegra_vi_mfi_ctx *mfi_ctx;
int vi_irq;
uint vi_bypass_bw;
uint max_bw;
bool master_deinitialized;
bool tpg_opened;
bool sensor_opened;
bool bypass;
};
extern const struct file_operations tegra_vi_ctrl_ops;
int nvhost_vi_prepare_poweroff(struct platform_device *);
int nvhost_vi_finalize_poweron(struct platform_device *);
void nvhost_vi_reset_all(struct platform_device *);
struct vi *tegra_vi_get(void);
int vi_v4l2_set_la(struct vi *tegra_vi, u32 vi_bypass_bw, bool is_ioctl);
int tegra_vi_register_mfi_cb(callback cb, void *cb_arg);
int tegra_vi_unregister_mfi_cb(void);
bool tegra_vi_has_mfi_callback(void);
int tegra_vi_mfi_event_notify(struct tegra_vi_mfi_ctx *mfi_ctx, u8 channel);
int tegra_vi_init_mfi(struct tegra_vi_mfi_ctx **mfi_ctx, u8 num_channels);
void tegra_vi_deinit_mfi(struct tegra_vi_mfi_ctx **mfi_ctx);
#endif

View File

@@ -0,0 +1,216 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra VI/CSI register offsets
*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __REGISTERS_H__
#define __REGISTERS_H__
/* VI registers */
#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT 200
#define TEGRA_VI_CFG_VI_INCR_SYNCPT 0x000
#define VI_CFG_VI_INCR_SYNCPT_COND(x) (x << 8)
#define VI_CSI_PP_LINE_START(port) (4 + (port) * 4)
#define VI_CSI_PP_FRAME_START(port) (5 + (port) * 4)
#define VI_CSI_MW_REQ_DONE(port) (6 + (port) * 4)
#define VI_CSI_MW_ACK_DONE(port) (7 + (port) * 4)
#define TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL 0x004
#define TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x008
#define TEGRA_VI_CFG_CTXSW 0x020
#define TEGRA_VI_CFG_INTSTATUS 0x024
#define TEGRA_VI_CFG_PWM_CONTROL 0x038
#define TEGRA_VI_CFG_PWM_HIGH_PULSE 0x03c
#define TEGRA_VI_CFG_PWM_LOW_PULSE 0x040
#define TEGRA_VI_CFG_PWM_SELECT_PULSE_A 0x044
#define TEGRA_VI_CFG_PWM_SELECT_PULSE_B 0x048
#define TEGRA_VI_CFG_PWM_SELECT_PULSE_C 0x04c
#define TEGRA_VI_CFG_PWM_SELECT_PULSE_D 0x050
#define TEGRA_VI_CFG_VGP1 0x064
#define TEGRA_VI_CFG_VGP2 0x068
#define TEGRA_VI_CFG_VGP3 0x06c
#define TEGRA_VI_CFG_VGP4 0x070
#define TEGRA_VI_CFG_VGP5 0x074
#define TEGRA_VI_CFG_VGP6 0x078
#define TEGRA_VI_CFG_INTERRUPT_MASK 0x08c
#define TEGRA_VI_CFG_INTERRUPT_TYPE_SELECT 0x090
#define TEGRA_VI_CFG_INTERRUPT_POLARITY_SELECT 0x094
#define TEGRA_VI_CFG_INTERRUPT_STATUS 0x098
#define TEGRA_VI_CFG_VGP_SYNCPT_CONFIG 0x0ac
#define TEGRA_VI_CFG_VI_SW_RESET 0x0b4
#define TEGRA_VI_CFG_CG_CTRL 0x0b8
#define VI_CG_2ND_LEVEL_EN 0x1
#define TEGRA_VI_CFG_VI_MCCIF_FIFOCTRL 0x0e4
#define TEGRA_VI_CFG_TIMEOUT_WCOAL_VI 0x0e8
#define TEGRA_VI_CFG_DVFS 0x0f0
#define TEGRA_VI_CFG_RESERVE 0x0f4
#define TEGRA_VI_CFG_RESERVE_1 0x0f8
/* CSI registers */
#define TEGRA_VI_CSI_BASE(x) (0x100 + (x) * 0x100)
#define TEGRA_VI_CSI_SW_RESET 0x000
#define TEGRA_VI_CSI_SINGLE_SHOT 0x004
#define SINGLE_SHOT_CAPTURE 0x1
#define CAPTURE_GOOD_FRAME 0x1
#define TEGRA_VI_CSI_SINGLE_SHOT_STATE_UPDATE 0x008
#define TEGRA_VI_CSI_IMAGE_DEF 0x00c
#define BYPASS_PXL_TRANSFORM_OFFSET 24
#define IMAGE_DEF_FORMAT_OFFSET 16
#define IMAGE_DEF_DEST_MEM 0x1
#define TEGRA_VI_CSI_RGB2Y_CTRL 0x010
#define TEGRA_VI_CSI_MEM_TILING 0x014
#define TEGRA_VI_CSI_IMAGE_SIZE 0x018
#define IMAGE_SIZE_HEIGHT_OFFSET 16
#define TEGRA_VI_CSI_IMAGE_SIZE_WC 0x01c
#define TEGRA_VI_CSI_IMAGE_DT 0x020
#define TEGRA_VI_CSI_SURFACE0_OFFSET_MSB 0x024
#define TEGRA_VI_CSI_SURFACE0_OFFSET_LSB 0x028
#define TEGRA_VI_CSI_SURFACE1_OFFSET_MSB 0x02c
#define TEGRA_VI_CSI_SURFACE1_OFFSET_LSB 0x030
#define TEGRA_VI_CSI_SURFACE2_OFFSET_MSB 0x034
#define TEGRA_VI_CSI_SURFACE2_OFFSET_LSB 0x038
#define TEGRA_VI_CSI_SURFACE0_BF_OFFSET_MSB 0x03c
#define TEGRA_VI_CSI_SURFACE0_BF_OFFSET_LSB 0x040
#define TEGRA_VI_CSI_SURFACE1_BF_OFFSET_MSB 0x044
#define TEGRA_VI_CSI_SURFACE1_BF_OFFSET_LSB 0x048
#define TEGRA_VI_CSI_SURFACE2_BF_OFFSET_MSB 0x04c
#define TEGRA_VI_CSI_SURFACE2_BF_OFFSET_LSB 0x050
#define TEGRA_VI_CSI_SURFACE0_STRIDE 0x054
#define TEGRA_VI_CSI_SURFACE1_STRIDE 0x058
#define TEGRA_VI_CSI_SURFACE2_STRIDE 0x05c
#define TEGRA_VI_CSI_SURFACE_HEIGHT0 0x060
#define TEGRA_VI_CSI_ISPINTF_CONFIG 0x064
#define TEGRA_VI_CSI_ERROR_STATUS 0x084
#define TEGRA_VI_CSI_ERROR_INT_MASK 0x088
#define TEGRA_VI_CSI_WD_CTRL 0x08c
#define TEGRA_VI_CSI_WD_PERIOD 0x090
/* CSI Pixel Parser registers: Starts from 0x838, offset 0x0 */
#define TEGRA_CSI_INPUT_STREAM_CONTROL 0x000
#define CSI_SKIP_PACKET_THRESHOLD_OFFSET 16
#define TEGRA_CSI_PIXEL_STREAM_CONTROL0 0x004
#define CSI_PP_PACKET_HEADER_SENT (0x1 << 4)
#define CSI_PP_DATA_IDENTIFIER_ENABLE (0x1 << 5)
#define CSI_PP_WORD_COUNT_SELECT_HEADER (0x1 << 6)
#define CSI_PP_CRC_CHECK_ENABLE (0x1 << 7)
#define CSI_PP_WC_CHECK (0x1 << 8)
#define CSI_PP_OUTPUT_FORMAT_STORE (0x3 << 16)
#define CSI_PPA_PAD_LINE_NOPAD (0x2 << 24)
#define CSI_PP_HEADER_EC_DISABLE (0x1 << 27)
#define CSI_PPA_PAD_FRAME_NOPAD (0x2 << 28)
#define TEGRA_CSI_PIXEL_STREAM_CONTROL1 0x008
#define CSI_PP_TOP_FIELD_FRAME_OFFSET 0
#define CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET 4
#define TEGRA_CSI_PIXEL_STREAM_GAP 0x00c
#define PP_FRAME_MIN_GAP_OFFSET 16
#define TEGRA_CSI_PIXEL_STREAM_PP_COMMAND 0x010
#define CSI_PP_ENABLE 0x1
#define CSI_PP_DISABLE 0x2
#define CSI_PP_RST 0x3
#define CSI_PP_SINGLE_SHOT_ENABLE (0x1 << 2)
#define CSI_PP_START_MARKER_FRAME_MAX_OFFSET 12
#define TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME 0x014
#define TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK 0x018
#define TEGRA_CSI_PIXEL_PARSER_STATUS 0x01c
#define TEGRA_CSI_CSI_SW_SENSOR_RESET 0x020
/* CSI PHY registers */
/* CSI_PHY_CIL_COMMAND_0 offset 0x0d0 from TEGRA_CSI_PIXEL_PARSER_0_BASE */
#define TEGRA_CSI_PHY_CIL_COMMAND 0x0d0
#define CSI_A_PHY_CIL_NOP 0x0
#define CSI_A_PHY_CIL_ENABLE 0x1
#define CSI_A_PHY_CIL_DISABLE 0x2
#define CSI_A_PHY_CIL_ENABLE_MASK 0x3
#define CSI_B_PHY_CIL_NOP (0x0 << 8)
#define CSI_B_PHY_CIL_ENABLE (0x1 << 8)
#define CSI_B_PHY_CIL_DISABLE (0x2 << 8)
#define CSI_B_PHY_CIL_ENABLE_MASK (0x3 << 8)
/* CSI CIL registers: Starts from 0x92c, offset 0xF4 */
#define TEGRA_CSI_CIL_OFFSET 0x0f4
#define TEGRA_CSI_CIL_PAD_CONFIG0 0x000
#define BRICK_CLOCK_A_4X (0x1 << 16)
#define BRICK_CLOCK_B_4X (0x2 << 16)
#define TEGRA_CSI_CIL_PAD_CONFIG1 0x004
#define TEGRA_CSI_CIL_PHY_CONTROL 0x008
#define BYPASS_LP_SEQ (0x1 << 6)
#define TEGRA_CSI_CIL_INTERRUPT_MASK 0x00c
#define TEGRA_CSI_CIL_STATUS 0x010
#define TEGRA_CSI_CILX_STATUS 0x014
#define TEGRA_CSI_CIL_ESCAPE_MODE_COMMAND 0x018
#define TEGRA_CSI_CIL_ESCAPE_MODE_DATA 0x01c
#define TEGRA_CSI_CIL_SW_SENSOR_RESET 0x020
/* CSI Pattern Generator registers: Starts from 0x9c4, offset 0x18c */
#define TEGRA_CSI_TPG_OFFSET 0x18c
#define TEGRA_CSI_PATTERN_GENERATOR_CTRL 0x000
#define PG_MODE_OFFSET 2
#define PG_ENABLE 0x1
#define PG_DISABLE 0x0
#define PG_VBLANK_OFFSET 16
#define TEGRA_CSI_PG_BLANK 0x004
#define TEGRA_CSI_PG_PHASE 0x008
#define TEGRA_CSI_PG_RED_FREQ 0x00c
#define PG_RED_VERT_INIT_FREQ_OFFSET 16
#define PG_RED_HOR_INIT_FREQ_OFFSET 0
#define TEGRA_CSI_PG_RED_FREQ_RATE 0x010
#define TEGRA_CSI_PG_GREEN_FREQ 0x014
#define PG_GREEN_VERT_INIT_FREQ_OFFSET 16
#define PG_GREEN_HOR_INIT_FREQ_OFFSET 0
#define TEGRA_CSI_PG_GREEN_FREQ_RATE 0x018
#define TEGRA_CSI_PG_BLUE_FREQ 0x01c
#define PG_BLUE_VERT_INIT_FREQ_OFFSET 16
#define PG_BLUE_HOR_INIT_FREQ_OFFSET 0
#define TEGRA_CSI_PG_BLUE_FREQ_RATE 0x020
#define TEGRA_CSI_PG_AOHDR 0x024
#define TEGRA_CSI_DPCM_CTRL_A 0xa2c
#define TEGRA_CSI_DPCM_CTRL_B 0xa30
/* Other CSI registers: Starts from 0xa44, offset 0x20c */
#define TEGRA_CSI_STALL_COUNTER 0x20c
#define TEGRA_CSI_CSI_READONLY_STATUS 0x210
#define TEGRA_CSI_CSI_SW_STATUS_RESET 0x214
#define TEGRA_CSI_CLKEN_OVERRIDE 0x218
#define TEGRA_CSI_DEBUG_CONTROL 0x21c
#define TEGRA_CSI_DEBUG_COUNTER_0 0x220
#define TEGRA_CSI_DEBUG_COUNTER_1 0x224
#define TEGRA_CSI_DEBUG_COUNTER_2 0x228
/* CSI Pixel Parser registers */
#define TEGRA_CSI_PIXEL_PARSER_0_BASE 0x0838
#define TEGRA_CSI_PIXEL_PARSER_1_BASE 0x086c
#define TEGRA_CSI_PIXEL_PARSER_2_BASE 0x1038
#define TEGRA_CSI_PIXEL_PARSER_3_BASE 0x106c
#define TEGRA_CSI_PIXEL_PARSER_4_BASE 0x1838
#define TEGRA_CSI_PIXEL_PARSER_5_BASE 0x186c
/* CSIA to CSIB register offset */
#define TEGRA_CSI_PORT_OFFSET 0x34
#define INVALID_CSI_PORT 0xFF
#define TEGRA_CSI_BLOCKS 3
#define SYNCPT_FIFO_DEPTH 2
#define PREVIOUS_BUFFER_DEC_INDEX 2
#define TEGRA_CLOCK_VI_MAX 793600000
#define TEGRA_CLOCK_TPG 927000000
#define TEGRA_CLOCK_CSI_PORT_MAX 102000000
#define TEGRA_SURFACE_ALIGNMENT 64
#endif

View File

@@ -0,0 +1,259 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra 18x VI register offsets
*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __VI4_REGISTERS_H__
#define __VI4_REGISTERS_H__
/* VI registers. Start from 0x0 */
#define VI_STREAMS (6)
#define VIRTUAL_CHANNELS (4)
#define VI4_CHANNEL_OFFSET 0x10000
#define CFG_INTERRUPT_STATUS 0x44
#define CFG_INTERRUPT_MASK 0x48
#define VGP6_INT_MASK (0x1 << 29)
#define VGP5_INT_MASK (0x1 << 28)
#define VGP4_INT_MASK (0x1 << 27)
#define VGP3_INT_MASK (0x1 << 26)
#define VGP2_INT_MASK (0x1 << 25)
#define VGP1_INT_MASK (0x1 << 24)
#define HOST_PKTINJECT_STALL_ERR_MASK (0x1 << 7)
#define CSIMUX_FIFO_OVFL_ERR_MASK (0x1 << 6)
#define ATOMP_PACKER_OVFL_ERR_MASK (0x1 << 5)
#define FMLITE_BUF_OVFL_ERR_MASK (0x1 << 4)
#define NOTIFY_FIFO_OVFL_ERR_MASK (0x1 << 3)
#define ISPBUFA_ERR_MASK (0x1 << 0)
#define CFG_PWM_HIGH_PULSE 0x50
#define PWM_HIGH_PULSE (0xffffffff << 0)
#define CSIMUX_CONFIG_STREAM_0 0x424
#define CSIMUX_CONFIG_STREAM_1 0x428
#define CSIMUX_CONFIG_STREAM_2 0x42C
#define CSIMUX_CONFIG_STREAM_3 0x430
#define CSIMUX_CONFIG_STREAM_4 0x434
#define CSIMUX_CONFIG_STREAM_5 0x438
#define FRAMEIDGEN (0xf << 26)
#define STICKYFAULT (0x1 << 25)
#define VPR (0x1 << 24)
#define SRESET (0x1 << 23)
#define QBLOCK (0x1 << 22)
#define FEINJECT (0x1 << 21)
#define FESHORTTIMER (0x1 << 20)
#define FEMAXTIME (0xffff << 4)
#define WT (0xf << 0)
#define NOTIFY_FIFO_TAG_0 0x4000
#define NOTIFY_FRAME_ID (0xffff << 16)
#define NOTIFY_CHANNEL (0xff << 8)
#define NOTIFY_CHANNEL_SHIFT (8)
#define NOTIFY_TAG (0x1f << 1)
#define NOTIFY_TAG_SHIFT (1)
#define NOTIFY_VALID (0x1 << 0)
#define TAG_FS 0
#define TAG_FE 1
#define TAG_CSIMUX_FRAME 2
#define TAG_CSIMUX_STREAM 3
#define TAG_CHANSEL_PXL_SOF 4
#define TAG_CHANSEL_PXL_EOF 5
#define TAG_CHANSEL_EMBED_SOF 6
#define TAG_CHANSEL_EMBED_EOF 7
#define TAG_CHANSEL_NLINES 8
#define TAG_CHANSEL_FAULT 9
#define TAG_CHANSEL_FAULT_FE 10
#define TAG_CHANSEL_NOMATCH 11
#define TAG_CHANSEL_COLLISION 12
#define TAG_CHANSEL_SHORT_FRAME 13
#define TAG_CHANSEL_LOAD_FRAMED 14
#define TAG_ATOMP_PACKER_OVERFLOW 15
#define TAG_ATOMP_FS 16
#define TAG_ATOMP_FE 17
#define TAG_ATOMP_FRAME_DONE 18
#define TAG_ATOMP_EMB_DATA_DONE 19
#define TAG_ATOMP_FRAME_NLINES_DONE 20
#define TAG_ATOMP_FRAME_TRUNCATED 21
#define TAG_ATOMP_FRAME_TOSSED 22
#define TAG_ATOMP_PDAF_DATA_DONE 23
#define TAG_ISPBUF_FIFO_OVERFLOW 26
#define TAG_ISPBUF_FS 27
#define TAG_ISPBUF_FE 28
#define TAG_VGP0_DONE 29
#define TAG_VGP1_DONE 30
#define TAG_FMLITE_DONE 31
#define NOTIFY_FIFO_TIMESTAMP_0 0x4004
#define NOTIFY_TIMESTAMP (0xffffffff << 0)
#define NOTIFY_FIFO_DATA_0 0x4008
#define NOTIFY_DATA (0xffffffff << 0)
#define NOTIFY_TAG_CLASSIFY_0 0x6000
#define NOTIFY_TAG_CLASSIFY_1 0x6004
#define NOTIFY_TAG_CLASSIFY_2 0x6008
#define NOTIFY_TAG_CLASSIFY_3 0x600c
#define NOTIFY_TAG_CLASSIFY_4 0x6010
#define STREAM5_FEINJECT_VC (0xf << 20)
#define STREAM4_FEINJECT_VC (0xf << 16)
#define STREAM3_FEINJECT_VC (0xf << 12)
#define STREAM2_FEINJECT_VC (0xf << 8)
#define STREAM1_FEINJECT_VC (0xf << 4)
#define STREAM0_FEINJECT_VC (0xf << 0)
#define NOTIFY_FIFO_OCCUPANCY_0 0x6014
#define NOTIFY_MAX (0x3ff << 20)
#define NOTIFY_CURRENT (0x3ff << 10)
#define NOTIFY_CURRENT_SHIFT 10
#define NOTIFY_SIZE (0x3ff << 0)
/* VI_CH registers. Start from 0x10000, offset 0x10000 */
#define CHANNEL_COMMAND 0x004
#define WR_ACT_SEL (0x1 << 5)
#define RD_MUX_SEL (0x1 << 4)
#define AUTOLOAD (0x1 << 1)
#define LOAD (0x1 << 0)
#define CONTROL 0x01c
#define SPARE (0xffff << 16)
#define POST_RUNAWAY_EMBED (0x1 << 4)
#define POST_RUNAWAY_PIXEL (0x1 << 3)
#define EARLY_ABORT (0x1 << 2)
#define SINGLESHOT (0x1 << 1)
#define MATCH_STATE_EN (0x1 << 0)
#define MATCH 0x020
#define STREAM (0x3f << 14)
#define STREAM_SHIFT (14)
#define STREAM_MASK (0x3f << 8)
#define VIRTUAL_CHANNEL (0xf << 4)
#define VIRTUAL_CHANNEL_SHIFT (4)
#define VIRTUAL_CHANNEL_MASK (0xf << 0)
#define MATCH_DATATYPE 0x024
#define DATATYPE (0x3f << 6)
#define DATATYPE_SHIFT (6)
#define DATATYPE_MASK (0x3f << 0)
#define DATATYPE_MASK_SHIFT (0)
#define MATCH_FRAMEID 0x028
#define FRAMEID (0xffff << 16)
#define FRAMEID_SHIFT (16)
#define FRAMEID_MASK (0xffff << 0)
#define DT_OVERRIDE 0x02c
#define OVRD_DT (0x3f << 1)
#define DT_OVRD_EN (0x1 << 0)
#define FRAME_X 0x030
#define CROP_X 0x04c
#define OUT_X 0x058
#define WIDTH (0xffff < 0)
#define FRAME_Y 0x034
#define CROP_Y 0x054
#define OUT_Y 0x05c
#define HEIGHT (0xffff < 0)
#define EMBED_X 0x038
#define MAX_BYTES (0x3ffff < 0)
#define EMBED_Y 0x03c
#define SKIP_Y 0x050
#define LINES (0xffff < 0)
/* for EMBED_Y only */
#define EXPECT (0x1 << 24)
#define LINE_TIMER 0x044
#define LINE_TIMER_EN (0x1 << 25)
#define PERIODIC (0x1 << 24)
#define TRIPLINE (0xffff << 0)
#define SKIP_X 0x048
#define PACKETS (0x1fff << 0)
#define NOTIFY_MASK 0x060
#define MASK_DTYPE_MISMATCH (0x1 << 31)
#define MASK_EMBED_INFRINGE (0x1 << 22)
#define MASK_EMBED_LONG_LINE (0x1 << 21)
#define MASK_EMBED_SPURIOUS (0x1 << 20)
#define MASK_EMBED_RUNAWAY (0x1 << 19)
#define MASK_EMBED_MISSING_LE (0x1 << 18)
#define MASK_EMBED_EOF (0x1 << 17)
#define MASK_EMBED_SOF (0x1 << 16)
#define MASK_PIXEL_LINE_TIMER (0x1 << 7)
#define MASK_PIXEL_SHORT_LINE (0x1 << 6)
#define MASK_PIXEL_LONG_LINE (0x1 << 5)
#define MASK_PIXEL_SPURIOUS (0x1 << 4)
#define MASK_PIXEL_RUNAWAY (0x1 << 3)
#define MASK_PIXEL_MISSING_LE (0x1 << 2)
#define MASK_PIXEL_EOF (0x1 << 1)
#define MASK_PIXEL_SOF (0x1 << 0)
#define NOTIFY_MASK_XCPT 0x064
#define MASK_NOMATCH (0x1 << 9)
#define MASK_EMBED_OPEN_LINE (0x1 << 8)
#define MASK_PIXEL_OPEN_LINE (0x1 << 7)
#define MASK_FORCE_FE (0x1 << 6)
#define MASK_STALE_FRAME (0x1 << 5)
#define MASK_COLLISION (0x1 << 4)
#define MASK_EMPTY_FRAME (0x1 << 3)
#define MASK_EMBED_SHORT_FRAME (0x1 << 2)
#define MASK_PIXEL_SHORT_FRAME (0x1 << 1)
#define MASK_LOAD_FRAMED (0x1 << 0)
#define FRAME_COUNT 0x06c
#define PIXFMT_ENABLE 0x080
#define PDAF_EN (0x1 << 2)
#define COMPAND_EN (0x1 << 1)
#define PIXFMT_EN (0x1 << 0)
#define PIXFMT_FORMAT 0x084
#define FORMAT (0xff << 0)
/* refer to enum tegra_image_format in core.h */
#define PIXFMT_WIDE 0x088
#define ENDIAN_BIG (0x0 << 1)
#define ENDIAN_LITTLE (0x1 << 1)
#define PIXFMT_WIDE_EN (0x1 << 0)
#define DPCM_STRIP 0x0b8
#define OVERFETCH (0x1fff < 16)
#define STRIP_WIDTH (0x1fff < 0)
#define ATOMP_DPCM_CHUNK 0x0ec
#define CHUNK_OFFSET (0x3ffff << 0)
#define ATOMP_SURFACE_OFFSET0 0x0e0
#define ATOMP_SURFACE_OFFSET1 0x0f0
#define ATOMP_SURFACE_OFFSET2 0x0fc
#define ATOMP_EMB_SURFACE_OFFSET0 0x108
#define SURFACE_OFFSET (0xffffffff << 0)
#define ATOMP_SURFACE_OFFSET0_H 0x0e4
#define ATOMP_SURFACE_OFFSET1_H 0x0f4
#define ATOMP_SURFACE_OFFSET2_H 0x100
#define ATOMP_EMB_SURFACE_OFFSET0_H 0x10c
#define SURFACE_OFFSET_HI (0xff << 0)
#define ATOMP_SURFACE_STRIDE0 0x0e8
#define ATOMP_SURFACE_STRIDE1 0x0f8
#define ATOMP_SURFACE_STRIDE2 0x104
#define ATOMP_EMB_SURFACE_STRIDE0 0x110
#define SURFACE_STRIDE (0x3ffff << 0)
#define ATOMP_RESERVE 0x120
#define ISPBUFA 0x134
#define ISPBUFA_EN (0x1 << 0)
#define ISPBUFA_ERROR 0x1000
#define FIFO_OVERFLOW (0x1 << 0)
#define FMLITE_ERROR 0x313c
#define NOTIFY_ERROR 0x6020
#endif /* __VI4_REGISTERS_H__ */

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,87 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2022 NVIDIA CORPORATION. All rights reserved.
*/
#ifndef _LINUX_TEGRA_I2C_RTCPU_H
#define _LINUX_TEGRA_I2C_RTCPU_H
#include <linux/i2c.h>
#include <media/camera_common.h>
struct tegra_i2c_rtcpu_sensor;
struct tegra_i2c_rtcpu_config {
unsigned int reg_bytes;
};
/*
* Sensor registration
*/
#ifdef CONFIG_I2C_TEGRA_CAMRTC
/* Find an I2C multi device, and register a sensor. */
struct tegra_i2c_rtcpu_sensor *tegra_i2c_rtcpu_register_sensor(
struct i2c_client *client,
const struct tegra_i2c_rtcpu_config *config);
#else
static inline struct tegra_i2c_rtcpu_sensor *tegra_i2c_rtcpu_register_sensor(
struct i2c_client *client,
const struct tegra_i2c_rtcpu_config *config)
{
return NULL;
}
#endif
/*
* I2C transfer
*/
#ifdef CONFIG_I2C_TEGRA_CAMRTC
/* Start or stop buffering of I2C transfer requests */
int tegra_i2c_rtcpu_aggregate(
struct tegra_i2c_rtcpu_sensor *sensor,
bool start);
/* Setting frame ID is available after aggregation started */
int tegra_i2c_rtcpu_set_frame_id(
struct tegra_i2c_rtcpu_sensor *sensor,
int frame_id);
/* Read one or more bytes from a sensor */
int tegra_i2c_rtcpu_read_reg8(
struct tegra_i2c_rtcpu_sensor *sensor,
unsigned int addr,
u8 *data,
unsigned int count);
/* Write one or more bytes to a sensor */
int tegra_i2c_rtcpu_write_reg8(
struct tegra_i2c_rtcpu_sensor *sensor,
unsigned int addr,
const u8 *data,
unsigned int count);
/* Write a table */
int tegra_i2c_rtcpu_write_table_8(
struct tegra_i2c_rtcpu_sensor *sensor,
const struct reg_8 table[],
const struct reg_8 override_list[],
int num_override_regs, u16 wait_ms_addr, u16 end_addr);
#else
#define tegra_i2c_rtcpu_aggregate(...) (0)
#define tegra_i2c_rtcpu_set_frame_id(...) (0)
#define tegra_i2c_rtcpu_read_reg8(...) (-ENODEV)
#define tegra_i2c_rtcpu_write_reg8(...) (-ENODEV)
#define tegra_i2c_rtcpu_write_table_8(...) (-ENODEV)
#endif
#endif /* _LINUX_TEGRA_I2C_RTCPU_H */

View File

@@ -0,0 +1,133 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2022, NVIDIA CORPORATION, All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM camera_common
#if !defined(_TRACE_CAMERA_COMMON_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_CAMERA_COMMON_H
#include <linux/version.h>
#include <linux/tracepoint.h>
struct tegra_channel;
struct timespec64;
DECLARE_EVENT_CLASS(channel_simple,
TP_PROTO(const char *name),
TP_ARGS(name),
TP_STRUCT__entry(
__string(name, name)
),
TP_fast_assign(
__assign_str(name, name);
),
TP_printk("%s", __get_str(name))
);
DEFINE_EVENT(channel_simple, tegra_channel_open,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(channel_simple, tegra_channel_close,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(channel_simple, tegra_channel_notify_status_callback,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DECLARE_EVENT_CLASS(channel,
TP_PROTO(const char *name, int num),
TP_ARGS(name, num),
TP_STRUCT__entry(
__string(name, name)
__field(int, num)
),
TP_fast_assign(
__assign_str(name, name);
__entry->num = num;
),
TP_printk("%s : 0x%x", __get_str(name), (int)__entry->num)
);
DEFINE_EVENT(channel, tegra_channel_set_stream,
TP_PROTO(const char *name, int num),
TP_ARGS(name, num)
);
DEFINE_EVENT(channel, csi_s_stream,
TP_PROTO(const char *name, int num),
TP_ARGS(name, num)
);
DEFINE_EVENT(channel, tegra_channel_set_power,
TP_PROTO(const char *name, int num),
TP_ARGS(name, num)
);
DEFINE_EVENT(channel, camera_common_s_power,
TP_PROTO(const char *name, int num),
TP_ARGS(name, num)
);
DEFINE_EVENT(channel, csi_s_power,
TP_PROTO(const char *name, int num),
TP_ARGS(name, num)
);
TRACE_EVENT(tegra_channel_capture_setup,
TP_PROTO(struct tegra_channel *chan, unsigned int index),
TP_ARGS(chan, index),
TP_STRUCT__entry(
__field(unsigned int, vnc_id)
__field(unsigned int, width)
__field(unsigned int, height)
__field(unsigned int, format)
),
TP_fast_assign(
__entry->vnc_id = chan->vnc_id[index];
__entry->width = chan->format.width;
__entry->height = chan->format.height;
__entry->format = chan->fmtinfo->img_fmt;
),
TP_printk("vnc_id %u W %u H %u fmt %x",
__entry->vnc_id, __entry->width, __entry->height,
__entry->format)
);
DECLARE_EVENT_CLASS(frame,
TP_PROTO(const char *str, struct timespec64 *ts),
TP_ARGS(str, ts),
TP_STRUCT__entry(
__string(str, str)
__field(long, tv_sec)
__field(long, tv_nsec)
),
TP_fast_assign(
__assign_str(str, str);
__entry->tv_sec = ts->tv_sec;
__entry->tv_nsec = ts->tv_nsec;
),
TP_printk("%s:%ld.%ld", __get_str(str), __entry->tv_sec,
__entry->tv_nsec)
);
DEFINE_EVENT(frame, tegra_channel_capture_frame,
TP_PROTO(const char *str, struct timespec64 *ts),
TP_ARGS(str, ts)
);
DEFINE_EVENT(frame, tegra_channel_capture_done,
TP_PROTO(const char *str, struct timespec64 *ts),
TP_ARGS(str, ts)
);
#endif
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -0,0 +1,290 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Eventlib interface for PVA
*
* Copyright (c) 2016-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef NVHOST_EVENTS_H
#define NVHOST_EVENTS_H
enum {
NVHOST_SCHEMA_VERSION = 1
};
#define NVHOST_EVENT_PROVIDER_NAME "nv_mm_nvhost"
/* Marks that the task is submitted to hardware */
struct nvhost_task_submit {
/* Engine class ID */
__u32 class_id;
/* Syncpoint ID */
__u32 syncpt_id;
/* Threshold for task completion */
__u32 syncpt_thresh;
/* PID */
__u32 pid;
/* TID */
__u32 tid;
/* Channel ID */
__u32 channel_id;
} __packed;
/* Marks that the task is moving to execution */
struct nvhost_task_begin {
/* Engine class ID */
__u32 class_id;
/* Syncpoint ID */
__u32 syncpt_id;
/* Threshold for task completion */
__u32 syncpt_thresh;
/* Channel ID */
__u32 channel_id;
} __packed;
/* Marks that the task is completed */
struct nvhost_task_end {
/* Engine class ID */
__u32 class_id;
/* Syncpoint ID */
__u32 syncpt_id;
/* Threshold for task completion */
__u32 syncpt_thresh;
/* Channel ID */
__u32 channel_id;
} __packed;
struct nvhost_vpu_perf_counter {
/* Engine class ID */
__u32 class_id;
/* Syncpoint ID */
__u32 syncpt_id;
/* Threshold for task completion */
__u32 syncpt_thresh;
/* Identifier for the R5/VPU algorithm executed */
__u32 operation;
/* Algorithm specific identifying tag for the perf counter */
__u32 tag;
__u32 count;
__u32 average;
__u64 variance;
__u32 minimum;
__u32 maximum;
} __packed;
/* Marks the pre/postfence associated with the task */
struct nvhost_task_fence {
/* Engine class ID */
__u32 class_id;
/* Kind (prefence or postfence) */
__u32 kind;
/* Fence-specific type (see nvdev_fence.h) */
__u32 fence_type;
/* Valid for NVDEV_FENCE_TYPE_SYNCPT only */
__u32 syncpt_id;
__u32 syncpt_thresh;
/* The task this fence is associated with */
__u32 task_syncpt_id;
__u32 task_syncpt_thresh;
/* Valid for NVDEV_FENCE_TYPE_SYNC_FD only */
__u32 sync_fd;
/* Valid for NVDEV_FENCE_TYPE_SEMAPHORE
and NVDEV_FENCE_TYPE_SEMAPHORE_TS */
__u32 semaphore_handle;
__u32 semaphore_offset;
__u32 semaphore_value;
} __packed;
struct nvhost_pva_task_state {
/* Engine class ID */
__u32 class_id;
/* Syncpoint ID */
__u32 syncpt_id;
/* Threshold for task completion */
__u32 syncpt_thresh;
/** ID of the VPU on which task was run. 0 or 1 */
__u8 vpu_id;
/** ID of the FW Queue on which the task was run. [0, 7] */
__u8 queue_id;
/* Identifier for the R5/VPU algorithm executed */
__u64 iova;
} __packed;
/* Marks that the task is submitted to hardware */
struct nv_camera_task_submit {
/* Engine class ID */
__u32 class_id;
/* Syncpoint ID */
__u32 syncpt_id;
/* Threshold for task completion */
__u32 syncpt_thresh;
/* PID */
__u32 pid;
/* TID */
__u32 tid;
} __packed;
/* Marks that the task is moving to execution */
struct nv_camera_task_begin {
/* Engine class ID */
__u32 class_id;
/* Syncpoint ID */
__u32 syncpt_id;
/* Threshold for task completion */
__u32 syncpt_thresh;
} __packed;
/* Marks that the task is completed */
struct nv_camera_task_end {
/* Engine class ID */
__u32 class_id;
/* Syncpoint ID */
__u32 syncpt_id;
/* Threshold for task completion */
__u32 syncpt_thresh;
} __packed;
/* Marks that we are logging a general task */
struct nv_camera_task_log {
/* Engine class ID */
__u32 class_id;
/* PID */
__u32 pid;
/* TID */
__u32 tid;
} __packed;
enum {
/* struct nvhost_task_submit */
NVHOST_TASK_SUBMIT = 0,
/* struct nvhost_task_begin */
NVHOST_TASK_BEGIN = 1,
/* struct nvhost_task_end */
NVHOST_TASK_END = 2,
/* struct nvhost_task_fence */
NVHOST_TASK_FENCE = 3,
NVHOST_VPU_PERF_COUNTER_BEGIN = 4,
NVHOST_VPU_PERF_COUNTER_END = 5,
/* struct nvhost_pva_task_state */
NVHOST_PVA_QUEUE_BEGIN = 6,
NVHOST_PVA_QUEUE_END = 7,
NVHOST_PVA_PREPARE_BEGIN = 8,
NVHOST_PVA_PREPARE_END = 9,
NVHOST_PVA_VPU0_BEGIN = 10,
NVHOST_PVA_VPU0_END = 11,
NVHOST_PVA_VPU1_BEGIN = 12,
NVHOST_PVA_VPU1_END = 13,
NVHOST_PVA_POST_BEGIN = 14,
NVHOST_PVA_POST_END = 15,
/* struct nv_camera_vi_capture_setup */
NVHOST_CAMERA_VI_CAPTURE_SETUP = 16,
/* struct nv_camera_vi_capture_reset */
NVHOST_CAMERA_VI_CAPTURE_RESET = 17,
/* struct nv_camera_vi_capture_release */
NVHOST_CAMERA_VI_CAPTURE_RELEASE = 18,
/* struct nv_camera_vi_capture_get_info */
NVHOST_CAMERA_VI_CAPTURE_GET_INFO = 19,
/* struct nv_camera_vi_capture_set_config */
NVHOST_CAMERA_VI_CAPTURE_SET_CONFIG = 20,
/* struct nv_camera_vi_capture_request */
NVHOST_CAMERA_VI_CAPTURE_REQUEST = 21,
/* struct nv_camera_vi_capture_status */
NVHOST_CAMERA_VI_CAPTURE_STATUS = 22,
/* struct nv_camera_vi_capture_set_progress_status */
NVHOST_CAMERA_VI_CAPTURE_SET_PROGRESS_STATUS = 24,
/* struct nv_camera_isp_capture_setup */
NVHOST_CAMERA_ISP_CAPTURE_SETUP = 25,
/* struct nv_camera_isp_capture_reset */
NVHOST_CAMERA_ISP_CAPTURE_RESET = 26,
/* struct nv_camera_isp_capture_release */
NVHOST_CAMERA_ISP_CAPTURE_RELEASE = 27,
/* struct nv_camera_isp_capture_get_info */
NVHOST_CAMERA_ISP_CAPTURE_GET_INFO = 28,
/* struct nv_camera_isp_capture_request */
NVHOST_CAMERA_ISP_CAPTURE_REQUEST = 29,
/* struct nv_camera_isp_capture_status */
NVHOST_CAMERA_ISP_CAPTURE_STATUS = 30,
/* struct nv_camera_isp_capture_program_request */
NVHOST_CAMERA_ISP_CAPTURE_PROGRAM_REQUEST = 31,
/* struct nv_camera_isp_capture_program_status */
NVHOST_CAMERA_ISP_CAPTURE_PROGRAM_STATUS = 32,
/* struct nv_camera_isp_capture_request_ex */
NVHOST_CAMERA_ISP_CAPTURE_REQUEST_EX = 33,
/* struct nv_camera_isp_capture_set_progress_status */
NVHOST_CAMERA_ISP_CAPTURE_SET_PROGRESS_STATUS = 34,
/* struct nv_camera_task_log */
NVHOST_CAMERA_TASK_LOG = 35,
NVHOST_NUM_EVENT_TYPES = 36
};
enum {
NVHOST_NUM_CUSTOM_FILTER_FLAGS = 0
};
#endif /* NVHOST_EVENTS_H */

View File

@@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra NVCSI Driver
*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __UAPI_LINUX_NVHOST_NVCSI_IOCTL_H
#define __UAPI_LINUX_NVHOST_NVCSI_IOCTL_H
#include <linux/ioctl.h>
#include <linux/types.h>
#if !defined(__KERNEL__)
#define __user
#endif
/* Bitmap
*
* | PHY_2 | PHY_1 | PHY_0 |
* | 11 10 | 9 8 | 7 6 | 5 4 | 3 2 | 1 0 |
* | CILB | CILA | CILB | CILA | CILB | CILA |
*/
#define PHY_0_CIL_A_IO0 0
#define PHY_0_CIL_A_IO1 1
#define PHY_0_CIL_B_IO0 2
#define PHY_0_CIL_B_IO1 3
#define PHY_1_CIL_A_IO0 4
#define PHY_1_CIL_A_IO1 5
#define PHY_1_CIL_B_IO0 6
#define PHY_1_CIL_B_IO1 7
#define PHY_2_CIL_A_IO0 8
#define PHY_2_CIL_A_IO1 9
#define PHY_2_CIL_B_IO0 10
#define PHY_2_CIL_B_IO1 11
#define PHY_3_CIL_A_IO0 12
#define PHY_3_CIL_A_IO1 13
#define PHY_3_CIL_B_IO0 14
#define PHY_3_CIL_B_IO1 15
#define NVCSI_PHY_CIL_NUM_LANE 16
#define NVCSI_PHY_0_NVCSI_CIL_A_IO0 (0x1 << PHY_0_CIL_A_IO0)
#define NVCSI_PHY_0_NVCSI_CIL_A_IO1 (0x1 << PHY_0_CIL_A_IO1)
#define NVCSI_PHY_0_NVCSI_CIL_B_IO0 (0x1 << PHY_0_CIL_B_IO0)
#define NVCSI_PHY_0_NVCSI_CIL_B_IO1 (0x1 << PHY_0_CIL_B_IO1)
#define NVCSI_PHY_1_NVCSI_CIL_A_IO0 (0x1 << PHY_1_CIL_A_IO0)
#define NVCSI_PHY_1_NVCSI_CIL_A_IO1 (0x1 << PHY_1_CIL_A_IO1)
#define NVCSI_PHY_1_NVCSI_CIL_B_IO0 (0x1 << PHY_1_CIL_B_IO0)
#define NVCSI_PHY_1_NVCSI_CIL_B_IO1 (0x1 << PHY_1_CIL_B_IO1)
#define NVCSI_PHY_2_NVCSI_CIL_A_IO0 (0x1 << PHY_2_CIL_A_IO0)
#define NVCSI_PHY_2_NVCSI_CIL_A_IO1 (0x1 << PHY_2_CIL_A_IO1)
#define NVCSI_PHY_2_NVCSI_CIL_B_IO0 (0x1 << PHY_2_CIL_B_IO0)
#define NVCSI_PHY_2_NVCSI_CIL_B_IO1 (0x1 << PHY_2_CIL_B_IO1)
#define NVCSI_PHY_3_NVCSI_CIL_A_IO0 (0x1 << PHY_3_CIL_A_IO0)
#define NVCSI_PHY_3_NVCSI_CIL_A_IO1 (0x1 << PHY_3_CIL_A_IO1)
#define NVCSI_PHY_3_NVCSI_CIL_B_IO0 (0x1 << PHY_3_CIL_B_IO0)
#define NVCSI_PHY_3_NVCSI_CIL_B_IO1 (0x1 << PHY_3_CIL_B_IO1)
#define NVCSI_PHY_NUM_BRICKS 4
#define NVHOST_NVCSI_IOCTL_MAGIC 'N'
#define NVHOST_NVCSI_IOCTL_DESKEW_SETUP _IOW(NVHOST_NVCSI_IOCTL_MAGIC, 1, long)
#define NVHOST_NVCSI_IOCTL_DESKEW_APPLY _IOW(NVHOST_NVCSI_IOCTL_MAGIC, 2, long)
#endif

View File

@@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra VI Driver
*
* Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __UAPI_LINUX_NVHOST_VI_IOCTL_H
#define __UAPI_LINUX_NVHOST_VI_IOCTL_H
#include <linux/ioctl.h>
#include <linux/types.h>
#if !defined(__KERNEL__)
#define __user
#endif
#define NVHOST_VI_IOCTL_MAGIC 'V'
/*
* /dev/nvhost-ctrl-vi devices
*
* Opening a '/dev/nvhost-ctrl-vi' device node creates a way to send
* ctrl ioctl to vi driver.
*
* /dev/nvhost-vi is for channel (context specific) operations. We use
* /dev/nvhost-ctrl-vi for global (context independent) operations on
* vi device.
*/
#define NVHOST_VI_IOCTL_ENABLE_TPG _IOW(NVHOST_VI_IOCTL_MAGIC, 1, uint)
#endif

View File

@@ -0,0 +1,119 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* camera device driver header
*
* Copyright (c) 2018-2022 NVIDIA Corporation. All rights reserved.
*/
#ifndef __UAPI_CAMERA_DEVICE_H_
#define __UAPI_CAMERA_DEVICE_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define __CAMERA_DEVICE_ALIGN __aligned(8)
/* Sensor, focuser, iris etc., */
#define MAX_DEVICES_PER_CHANNEL 4
/*
* Increasing below values must validate
* copy_from or copy_to works properly
*/
#define MAX_COMMANDS 256
#define MAX_BLOB_SIZE 2048
struct i2c_bus {
__u32 reg_base;
__u32 clk_rate;
__u32 flags;
__u8 reserved[4];
};
struct i2c_mux {
bool is_mux_valid;
__u8 mux_channel;
__u16 mux_addr;
__u8 reserved[4];
};
struct i2c_dev {
__u16 addr;
__u8 pad[2];
__u32 flags;
};
struct spi_bus {
__u32 reg_base;
__u32 clk_rate;
__u32 flags;
__u8 reserved[4];
};
struct spi_dev {
__u8 port;
__u16 addr;
__u8 pad;
__u32 flags;
__u8 pad1[4];
};
struct i2c_sensor_cfg {
__u32 num_devs;
struct i2c_bus bus;
struct i2c_mux mux;
struct i2c_dev sd[MAX_DEVICES_PER_CHANNEL];
};
struct spi_sensor_cfg {
__u32 num_devs;
struct spi_bus bus;
struct spi_dev sd[MAX_DEVICES_PER_CHANNEL];
};
struct sensor_cfg {
__u8 type; /* SPI or I2C */
__u8 pad[3]; /* for alignment */
union {
struct i2c_sensor_cfg i2c_sensor;
struct spi_sensor_cfg spi_sensor;
} u;
} __CAMERA_DEVICE_ALIGN;
struct sensor_cmd {
__u32 opcode;
__u32 addr;
};
struct sensor_blob {
__u32 num_cmds;
__u32 buf_size;
struct sensor_cmd cmds[MAX_COMMANDS];
__u8 buf[MAX_BLOB_SIZE];
} __CAMERA_DEVICE_ALIGN;
struct sensor_blob_cfg {
__u32 nlines;
struct sensor_blob *blob;
} __CAMERA_DEVICE_ALIGN;
#define CAMERA_DEVICE_NONE 0
#define CAMERA_DEVICE_I2C_SENSOR (0x1 << 1)
#define CAMERA_DEVICE_SPI_SENSOR (0x1 << 2)
/* Future extensions - if necessary */
#define CAMERA_DEVICE_VI (0x1 << 8)
#define CAMERA_DEVICE_CSI (0x1 << 9)
#define CAMERA_DEVICE_ISP (0x1 << 16)
struct camdev_chan_cfg {
__u32 type;
struct sensor_cfg scfg;
} __CAMERA_DEVICE_ALIGN;
/* common functionality */
#define CAMERA_DEVICE_REGISTER _IOW('C', 1, struct camdev_chan_cfg)
#define CAMERA_DEVICE_UNREGISTER _IOW('C', 2, __u32)
/* sensor functionality */
#define SENSOR_BLOB_EXECUTE _IOW('C', 10, struct sensor_blob_cfg)
#endif

View File

@@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef _UAPI_TEGRA_CAMERA_PLATFORM_H_
#define _UAPI_TEGRA_CAMERA_PLATFORM_H_
#include <linux/ioctl.h>
#include <linux/types.h>
#define TEGRA_CAMERA_IOCTL_SET_BW _IOW('o', 1, struct bw_info)
#define TEGRA_CAMERA_IOCTL_GET_BW _IOR('o', 2, __u64)
#define TEGRA_CAMERA_IOCTL_GET_CURR_REQ_ISO_BW _IOR('o', 3, __u64)
struct bw_info {
__u8 is_iso;
__u64 bw;
};
#endif

45
include/video/vi4.h Normal file
View File

@@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra Graphics Host VI
*
* Copyright (c) 2015-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef __TEGRA_VI4_H__
#define __TEGRA_VI4_H__
#include <media/mc_common.h>
struct reset_control;
extern struct vi_notify_driver nvhost_vi_notify_driver;
void nvhost_vi_notify_error(struct platform_device *);
struct nvhost_vi_dev {
struct nvhost_vi_notify_dev *hvnd;
struct reset_control *vi_reset;
struct reset_control *vi_tsc_reset;
struct dentry *debug_dir;
int error_irq;
bool busy;
atomic_t overflow;
atomic_t notify_overflow;
atomic_t fmlite_overflow;
struct tegra_mc_vi mc_vi;
unsigned int vi_bypass_bw;
};
int nvhost_vi4_prepare_poweroff(struct platform_device *);
int nvhost_vi4_finalize_poweron(struct platform_device *);
void nvhost_vi4_idle(struct platform_device *);
void nvhost_vi4_busy(struct platform_device *);
void nvhost_vi4_reset(struct platform_device *);
int nvhost_vi4_aggregate_constraints(struct platform_device *dev,
int clk_index,
unsigned long floor_rate,
unsigned long pixelrate,
unsigned long bw_constraint);
int vi4_v4l2_set_la(struct platform_device *pdev,
u32 vi_bypass_bw, u32 is_ioctl);
#endif