mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
video: host: Build nvhost camera as OOT modules
Port the nvhost driver below from /kernel/nvidia to /kernel/nvidia-oot as OOT modules and switching from using downstream nvhost driver to upstream host1x driver: - capture support driver - nvcsi driver - vi driver - isp driver Change-Id: I31814f202816230029bd8454b8ff58530a96f436 Signed-off-by: Frank Chen <frankc@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2797339 Reviewed-by: Ankur Pawar <ankurp@nvidia.com> Reviewed-by: Shiva Dubey <sdubey@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
92ac7bc35a
commit
5f087cb0ed
@@ -7,3 +7,7 @@ obj-m += host/pva/
|
||||
obj-m += tsec/
|
||||
obj-m += dc/bridge/
|
||||
obj-m += camera/
|
||||
obj-m += host/capture/
|
||||
obj-m += host/nvcsi/
|
||||
obj-m += host/vi/
|
||||
obj-m += host/isp/
|
||||
|
||||
10
drivers/video/tegra/host/capture/Makefile
Normal file
10
drivers/video/tegra/host/capture/Makefile
Normal file
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/drivers/gpu/host1x/hw/
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/drivers/gpu/host1x/include
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/include
|
||||
|
||||
nvhost-capture-objs = capture-support.o
|
||||
|
||||
obj-m += nvhost-capture.o
|
||||
202
drivers/video/tegra/host/capture/capture-support.c
Normal file
202
drivers/video/tegra/host/capture/capture-support.c
Normal file
@@ -0,0 +1,202 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Capture support for syncpoint and GoS management
|
||||
*
|
||||
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "capture-support.h"
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <soc/tegra/camrtc-capture.h>
|
||||
#include <linux/version.h>
|
||||
#include <soc/tegra/fuse.h>
|
||||
#include <linux/nvhost.h>
|
||||
|
||||
int capture_alloc_syncpt(struct platform_device *pdev,
|
||||
const char *name,
|
||||
uint32_t *syncpt_id)
|
||||
{
|
||||
uint32_t id;
|
||||
|
||||
if (syncpt_id == NULL) {
|
||||
dev_err(&pdev->dev, "%s: null argument\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
id = nvhost_get_syncpt_client_managed(pdev, name);
|
||||
if (id == 0) {
|
||||
dev_err(&pdev->dev, "%s: syncpt allocation failed\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
*syncpt_id = id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(capture_alloc_syncpt);
|
||||
|
||||
void capture_release_syncpt(struct platform_device *pdev, uint32_t id)
|
||||
{
|
||||
dev_dbg(&pdev->dev, "%s: id=%u\n", __func__, id);
|
||||
nvhost_syncpt_put_ref_ext(pdev, id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(capture_release_syncpt);
|
||||
|
||||
void capture_get_gos_table(struct platform_device *pdev,
|
||||
int *gos_count,
|
||||
const dma_addr_t **gos_table)
|
||||
{
|
||||
int count = 0;
|
||||
dma_addr_t *table = NULL;
|
||||
|
||||
*gos_count = count;
|
||||
*gos_table = table;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(capture_get_gos_table);
|
||||
|
||||
int capture_get_syncpt_gos_backing(struct platform_device *pdev,
|
||||
uint32_t id,
|
||||
dma_addr_t *syncpt_addr,
|
||||
uint32_t *gos_index,
|
||||
uint32_t *gos_offset)
|
||||
{
|
||||
uint32_t index = GOS_INDEX_INVALID;
|
||||
uint32_t offset = 0;
|
||||
dma_addr_t addr;
|
||||
|
||||
if (id == 0) {
|
||||
dev_err(&pdev->dev, "%s: syncpt id is invalid\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (syncpt_addr == NULL || gos_index == NULL || gos_offset == NULL) {
|
||||
dev_err(&pdev->dev, "%s: null arguments\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
addr = nvhost_syncpt_address(pdev, id);
|
||||
|
||||
*syncpt_addr = addr;
|
||||
*gos_index = index;
|
||||
*gos_offset = offset;
|
||||
|
||||
dev_dbg(&pdev->dev, "%s: id=%u addr=0x%llx gos_idx=%u gos_offset=%u\n",
|
||||
__func__, id, addr, index, offset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(capture_get_syncpt_gos_backing);
|
||||
|
||||
static int capture_support_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct nvhost_device_data *info;
|
||||
int err = 0;
|
||||
|
||||
info = (void *)of_device_get_match_data(dev);
|
||||
if (WARN_ON(info == NULL))
|
||||
return -ENODATA;
|
||||
|
||||
info->pdev = pdev;
|
||||
mutex_init(&info->lock);
|
||||
platform_set_drvdata(pdev, info);
|
||||
|
||||
(void) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
|
||||
|
||||
err = nvhost_client_device_get_resources(pdev);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
err = nvhost_module_init(pdev);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
err = nvhost_client_device_init(pdev);
|
||||
if (err) {
|
||||
nvhost_module_deinit(pdev);
|
||||
goto error;
|
||||
}
|
||||
|
||||
err = nvhost_syncpt_unit_interface_init(pdev);
|
||||
if (err)
|
||||
goto device_release;
|
||||
|
||||
return 0;
|
||||
|
||||
device_release:
|
||||
nvhost_client_device_release(pdev);
|
||||
error:
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(dev, "probe failed: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int capture_support_remove(struct platform_device *pdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvhost_device_data t19_isp_thi_info = {
|
||||
.devfs_name = "isp-thi",
|
||||
.moduleid = 4, //NVHOST_MODULE_ISP,
|
||||
};
|
||||
|
||||
struct nvhost_device_data t19_vi_thi_info = {
|
||||
.devfs_name = "vi-thi",
|
||||
.moduleid = 2, //NVHOST_MODULE_VI,
|
||||
};
|
||||
|
||||
struct nvhost_device_data t23x_vi0_thi_info = {
|
||||
.devfs_name = "vi0-thi",
|
||||
.moduleid = 2, //NVHOST_MODULE_VI,
|
||||
};
|
||||
|
||||
struct nvhost_device_data t23x_vi1_thi_info = {
|
||||
.devfs_name = "vi1-thi",
|
||||
.moduleid = 3, //NVHOST_MODULE_VI2,
|
||||
};
|
||||
|
||||
static const struct of_device_id capture_support_match[] = {
|
||||
{
|
||||
.compatible = "nvidia,tegra194-isp-thi",
|
||||
.data = &t19_isp_thi_info,
|
||||
},
|
||||
{
|
||||
.compatible = "nvidia,tegra194-vi-thi",
|
||||
.data = &t19_vi_thi_info,
|
||||
},
|
||||
{
|
||||
.name = "vi0-thi",
|
||||
.compatible = "nvidia,tegra234-vi-thi",
|
||||
.data = &t23x_vi0_thi_info,
|
||||
},
|
||||
{
|
||||
.name = "vi1-thi",
|
||||
.compatible = "nvidia,tegra234-vi-thi",
|
||||
.data = &t23x_vi1_thi_info,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, capture_support_match);
|
||||
|
||||
static struct platform_driver capture_support_driver = {
|
||||
.probe = capture_support_probe,
|
||||
.remove = capture_support_remove,
|
||||
.driver = {
|
||||
/* Only suitable name for dummy falcon driver */
|
||||
.name = "scare-pigeon",
|
||||
.of_match_table = capture_support_match,
|
||||
.pm = &nvhost_module_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(capture_support_driver);
|
||||
MODULE_LICENSE("GPL");
|
||||
30
drivers/video/tegra/host/capture/capture-support.h
Normal file
30
drivers/video/tegra/host/capture/capture-support.h
Normal file
@@ -0,0 +1,30 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Capture support for syncpoint and GoS management
|
||||
*
|
||||
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _CAPTURE_SUPPORT_H_
|
||||
#define _CAPTURE_SUPPORT_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
int capture_alloc_syncpt(struct platform_device *pdev,
|
||||
const char *name,
|
||||
uint32_t *syncpt_id);
|
||||
|
||||
void capture_release_syncpt(struct platform_device *pdev, uint32_t id);
|
||||
|
||||
void capture_get_gos_table(struct platform_device *pdev,
|
||||
int *gos_count,
|
||||
const dma_addr_t **gos_table);
|
||||
|
||||
int capture_get_syncpt_gos_backing(struct platform_device *pdev,
|
||||
uint32_t id,
|
||||
dma_addr_t *syncpt_addr,
|
||||
uint32_t *gos_index,
|
||||
uint32_t *gos_offset);
|
||||
|
||||
#endif /* _CAPTURE_SUPPORT_H_ */
|
||||
12
drivers/video/tegra/host/isp/Makefile
Normal file
12
drivers/video/tegra/host/isp/Makefile
Normal file
@@ -0,0 +1,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/drivers/gpu/host1x/hw/
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/drivers/gpu/host1x/include
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/include
|
||||
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/host
|
||||
LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
|
||||
LINUXINCLUDE += -DCONFIG_TEGRA_HOST1X
|
||||
|
||||
nvhost-isp5-objs = isp5.o
|
||||
obj-m += nvhost-isp5.o
|
||||
354
drivers/video/tegra/host/isp/isp5.c
Normal file
354
drivers/video/tegra/host/isp/isp5.c
Normal file
@@ -0,0 +1,354 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* ISP5 driver
|
||||
*
|
||||
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <asm/ioctls.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/version.h>
|
||||
#include "nvhost.h"
|
||||
#include <media/fusa-capture/capture-isp-channel.h>
|
||||
#include <media/tegra_camera_platform.h>
|
||||
#include <soc/tegra/camrtc-capture.h>
|
||||
#include <soc/tegra/fuse-helper.h>
|
||||
|
||||
#include "isp5.h"
|
||||
#include "capture/capture-support.h"
|
||||
#include <uapi/linux/nvhost_isp_ioctl.h>
|
||||
|
||||
#define ISP_PPC 2
|
||||
/* 20% overhead */
|
||||
#define ISP_OVERHEAD 20
|
||||
|
||||
struct host_isp5 {
|
||||
struct platform_device *pdev;
|
||||
struct platform_device *isp_thi;
|
||||
};
|
||||
|
||||
static int isp5_alloc_syncpt(struct platform_device *pdev,
|
||||
const char *name,
|
||||
uint32_t *syncpt_id)
|
||||
{
|
||||
struct host_isp5 *isp5 = nvhost_get_private_data(pdev);
|
||||
|
||||
return capture_alloc_syncpt(isp5->isp_thi, name, syncpt_id);
|
||||
}
|
||||
|
||||
static void isp5_release_syncpt(struct platform_device *pdev, uint32_t id)
|
||||
{
|
||||
struct host_isp5 *isp5 = nvhost_get_private_data(pdev);
|
||||
|
||||
capture_release_syncpt(isp5->isp_thi, id);
|
||||
}
|
||||
|
||||
static int isp5_get_syncpt_gos_backing(struct platform_device *pdev,
|
||||
uint32_t id,
|
||||
dma_addr_t *syncpt_addr,
|
||||
uint32_t *gos_index,
|
||||
uint32_t *gos_offset)
|
||||
{
|
||||
struct host_isp5 *isp5 = nvhost_get_private_data(pdev);
|
||||
|
||||
return capture_get_syncpt_gos_backing(isp5->isp_thi, id,
|
||||
syncpt_addr, gos_index, gos_offset);
|
||||
|
||||
}
|
||||
|
||||
static uint32_t isp5_get_gos_table(struct platform_device *pdev,
|
||||
const dma_addr_t **table)
|
||||
{
|
||||
struct host_isp5 *isp5 = nvhost_get_private_data(pdev);
|
||||
uint32_t count;
|
||||
|
||||
capture_get_gos_table(isp5->isp_thi, &count, table);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct isp_channel_drv_ops isp5_channel_drv_ops = {
|
||||
.alloc_syncpt = isp5_alloc_syncpt,
|
||||
.release_syncpt = isp5_release_syncpt,
|
||||
.get_gos_table = isp5_get_gos_table,
|
||||
.get_syncpt_gos_backing = isp5_get_syncpt_gos_backing,
|
||||
};
|
||||
|
||||
int isp5_priv_early_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct nvhost_device_data *info;
|
||||
struct device_node *thi_np;
|
||||
struct platform_device *thi = NULL;
|
||||
struct host_isp5 *isp5;
|
||||
int err = 0;
|
||||
|
||||
info = (void *)of_device_get_match_data(dev);
|
||||
if (unlikely(info == NULL)) {
|
||||
dev_WARN(dev, "no platform data\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
thi_np = of_parse_phandle(dev->of_node, "nvidia,isp-falcon-device", 0);
|
||||
if (thi_np == NULL) {
|
||||
dev_WARN(dev, "missing %s handle\n",
|
||||
"nvidia,isp-falcon-device");
|
||||
err = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
|
||||
thi = of_find_device_by_node(thi_np);
|
||||
of_node_put(thi_np);
|
||||
|
||||
if (thi == NULL) {
|
||||
err = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (thi->dev.driver == NULL) {
|
||||
platform_device_put(thi);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
isp5 = devm_kzalloc(dev, sizeof(*isp5), GFP_KERNEL);
|
||||
if (!isp5)
|
||||
return -ENOMEM;
|
||||
|
||||
isp5->isp_thi = thi;
|
||||
isp5->pdev = pdev;
|
||||
info->pdev = pdev;
|
||||
mutex_init(&info->lock);
|
||||
platform_set_drvdata(pdev, info);
|
||||
info->private_data = isp5;
|
||||
|
||||
/* A bit was stolen */
|
||||
(void) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
info->private_data = NULL;
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "probe failed: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int isp5_priv_late_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct tegra_camera_dev_info isp_info;
|
||||
struct nvhost_device_data *info = platform_get_drvdata(pdev);
|
||||
struct host_isp5 *isp5 = info->private_data;
|
||||
int err;
|
||||
|
||||
memset(&isp_info, 0, sizeof(isp_info));
|
||||
isp_info.overhead = ISP_OVERHEAD;
|
||||
isp_info.ppc = ISP_PPC;
|
||||
isp_info.hw_type = HWTYPE_ISPA;
|
||||
isp_info.pdev = pdev;
|
||||
err = tegra_camera_device_register(&isp_info, isp5);
|
||||
if (err)
|
||||
goto device_release;
|
||||
|
||||
err = isp_channel_drv_register(pdev, &isp5_channel_drv_ops);
|
||||
if (err)
|
||||
goto device_release;
|
||||
|
||||
return 0;
|
||||
|
||||
device_release:
|
||||
nvhost_client_device_release(pdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int isp5_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata;
|
||||
struct host_isp5 *isp5;
|
||||
int err = 0;
|
||||
|
||||
err = isp5_priv_early_probe(pdev);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
pdata = platform_get_drvdata(pdev);
|
||||
isp5 = pdata->private_data;
|
||||
|
||||
err = nvhost_client_device_get_resources(pdev);
|
||||
if (err)
|
||||
goto put_thi;
|
||||
|
||||
err = nvhost_module_init(pdev);
|
||||
if (err)
|
||||
goto put_thi;
|
||||
|
||||
err = nvhost_client_device_init(pdev);
|
||||
if (err) {
|
||||
nvhost_module_deinit(pdev);
|
||||
goto put_thi;
|
||||
}
|
||||
|
||||
err = isp5_priv_late_probe(pdev);
|
||||
if (err)
|
||||
goto put_thi;
|
||||
|
||||
return 0;
|
||||
|
||||
put_thi:
|
||||
platform_device_put(isp5->isp_thi);
|
||||
error:
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "probe failed: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static long isp_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct t194_isp5_file_private *filepriv = file->private_data;
|
||||
struct platform_device *pdev = filepriv->pdev;
|
||||
|
||||
if (_IOC_TYPE(cmd) != NVHOST_ISP_IOCTL_MAGIC)
|
||||
return -EFAULT;
|
||||
|
||||
switch (_IOC_NR(cmd)) {
|
||||
case _IOC_NR(NVHOST_ISP_IOCTL_SET_ISP_LA_BW): {
|
||||
/* No BW control needed. Return without error. */
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
dev_err(&pdev->dev,
|
||||
"%s: Unknown ISP ioctl.\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int isp_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct nvhost_device_data *pdata = container_of(inode->i_cdev,
|
||||
struct nvhost_device_data, ctrl_cdev);
|
||||
struct platform_device *pdev = pdata->pdev;
|
||||
struct t194_isp5_file_private *filepriv;
|
||||
|
||||
filepriv = kzalloc(sizeof(*filepriv), GFP_KERNEL);
|
||||
if (unlikely(filepriv == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
filepriv->pdev = pdev;
|
||||
|
||||
file->private_data = filepriv;
|
||||
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
static int isp_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct t194_isp5_file_private *filepriv = file->private_data;
|
||||
|
||||
kfree(filepriv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations tegra194_isp5_ctrl_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = isp_open,
|
||||
.unlocked_ioctl = isp_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = isp_ioctl,
|
||||
#endif
|
||||
.release = isp_release,
|
||||
};
|
||||
|
||||
static int isp5_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
struct host_isp5 *isp5 = (struct host_isp5 *)pdata->private_data;
|
||||
|
||||
tegra_camera_device_unregister(isp5);
|
||||
|
||||
isp_channel_drv_unregister(&pdev->dev);
|
||||
|
||||
platform_device_put(isp5->isp_thi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvhost_device_data t19_isp5_info = {
|
||||
.devfs_name = "isp",
|
||||
.moduleid = 4, //NVHOST_MODULE_ISP,
|
||||
.clocks = {
|
||||
{"isp", UINT_MAX},
|
||||
},
|
||||
.ctrl_ops = &tegra194_isp5_ctrl_ops,
|
||||
.pre_virt_init = isp5_priv_early_probe,
|
||||
.post_virt_init = isp5_priv_late_probe,
|
||||
.autosuspend_delay = 500,
|
||||
.can_powergate = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id tegra_isp5_of_match[] = {
|
||||
{
|
||||
.compatible = "nvidia,tegra194-isp",
|
||||
.data = &t19_isp5_info,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra_isp5_of_match);
|
||||
|
||||
static struct platform_driver isp5_driver = {
|
||||
.probe = isp5_probe,
|
||||
.remove = isp5_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "tegra194-isp5",
|
||||
#ifdef CONFIG_OF
|
||||
.of_match_table = tegra_isp5_of_match,
|
||||
#endif
|
||||
#ifdef CONFIG_PM
|
||||
.pm = &nvhost_module_pm_ops,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
static int __init capture_isp_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = isp_channel_drv_init();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = platform_driver_register(&isp5_driver);
|
||||
if (err) {
|
||||
isp_channel_drv_exit();
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
static void __exit capture_isp_exit(void)
|
||||
{
|
||||
isp_channel_drv_exit();
|
||||
platform_driver_unregister(&isp5_driver);
|
||||
}
|
||||
|
||||
module_init(capture_isp_init);
|
||||
module_exit(capture_isp_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
22
drivers/video/tegra/host/isp/isp5.h
Normal file
22
drivers/video/tegra/host/isp/isp5.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Tegra ISP5
|
||||
*
|
||||
* Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __NVHOST_ISP5_H__
|
||||
#define __NVHOST_ISP5_H__
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
extern const struct file_operations tegra194_isp5_ctrl_ops;
|
||||
|
||||
struct t194_isp5_file_private {
|
||||
struct platform_device *pdev;
|
||||
};
|
||||
|
||||
int isp5_priv_early_probe(struct platform_device *pdev);
|
||||
int isp5_priv_late_probe(struct platform_device *pdev);
|
||||
|
||||
#endif
|
||||
978
drivers/video/tegra/host/isp/nvhost.h
Normal file
978
drivers/video/tegra/host/isp/nvhost.h
Normal file
@@ -0,0 +1,978 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Tegra graphics host driver
|
||||
*
|
||||
* Copyright (c) 2009-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_NVHOST_H
|
||||
#define __LINUX_NVHOST_H
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
#include <uapi/linux/nvdev_fence.h>
|
||||
|
||||
#ifdef CONFIG_TEGRA_HOST1X
|
||||
#include <linux/host1x.h>
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST) && IS_ENABLED(CONFIG_TEGRA_HOST1X)
|
||||
#error "Unable to enable TEGRA_GRHOST or TEGRA_HOST1X at the same time!"
|
||||
#endif
|
||||
|
||||
struct tegra_bwmgr_client;
|
||||
|
||||
struct nvhost_channel;
|
||||
struct nvhost_master;
|
||||
struct nvhost_cdma;
|
||||
struct nvhost_hwctx;
|
||||
struct nvhost_device_power_attr;
|
||||
struct nvhost_device_profile;
|
||||
struct mem_mgr;
|
||||
struct nvhost_as_moduleops;
|
||||
struct nvhost_ctrl_sync_fence_info;
|
||||
struct nvhost_sync_timeline;
|
||||
struct nvhost_sync_pt;
|
||||
enum nvdev_fence_kind;
|
||||
struct nvdev_fence;
|
||||
struct sync_pt;
|
||||
struct dma_fence;
|
||||
struct nvhost_fence;
|
||||
|
||||
#define NVHOST_MODULE_MAX_CLOCKS 8
|
||||
#define NVHOST_MODULE_MAX_SYNCPTS 16
|
||||
#define NVHOST_MODULE_MAX_WAITBASES 3
|
||||
#define NVHOST_MODULE_MAX_MODMUTEXES 5
|
||||
#define NVHOST_MODULE_MAX_IORESOURCE_MEM 5
|
||||
#define NVHOST_NAME_SIZE 24
|
||||
#define NVSYNCPT_INVALID (-1)
|
||||
|
||||
#define NVSYNCPT_AVP_0 (10) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_3D (22) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_VBLANK0 (26) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_VBLANK1 (27) /* t20, t30, t114, t148 */
|
||||
|
||||
#define NVMODMUTEX_ISP_0 (1) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_ISP_1 (2) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_NVJPG (3) /* t210 */
|
||||
#define NVMODMUTEX_NVDEC (4) /* t210 */
|
||||
#define NVMODMUTEX_MSENC (5) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_TSECA (6) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_TSECB (7) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI (8) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI_0 (8) /* t148 */
|
||||
#define NVMODMUTEX_VIC (10) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI_1 (11) /* t124, t132, t210 */
|
||||
|
||||
enum nvhost_power_sysfs_attributes {
|
||||
NVHOST_POWER_SYSFS_ATTRIB_AUTOSUSPEND_DELAY,
|
||||
NVHOST_POWER_SYSFS_ATTRIB_FORCE_ON,
|
||||
NVHOST_POWER_SYSFS_ATTRIB_MAX
|
||||
};
|
||||
|
||||
struct nvhost_notification {
|
||||
struct { /* 0000- */
|
||||
__u32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 */
|
||||
} time_stamp; /* -0007 */
|
||||
__u32 info32; /* info returned depends on method 0008-000b */
|
||||
#define NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT 8
|
||||
#define NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY 13
|
||||
#define NVHOST_CHANNEL_GR_SEMAPHORE_TIMEOUT 24
|
||||
#define NVHOST_CHANNEL_GR_ILLEGAL_NOTIFY 25
|
||||
#define NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT 31
|
||||
#define NVHOST_CHANNEL_PBDMA_ERROR 32
|
||||
#define NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR 43
|
||||
__u16 info16; /* info returned depends on method 000c-000d */
|
||||
__u16 status; /* user sets bit 15, NV sets status 000e-000f */
|
||||
#define NVHOST_CHANNEL_SUBMIT_TIMEOUT 1
|
||||
};
|
||||
|
||||
struct nvhost_gating_register {
|
||||
u64 addr;
|
||||
u32 prod;
|
||||
u32 disable;
|
||||
};
|
||||
|
||||
struct nvhost_actmon_register {
|
||||
u32 addr;
|
||||
u32 val;
|
||||
};
|
||||
|
||||
enum tegra_emc_request_type {
|
||||
TEGRA_SET_EMC_FLOOR, /* lower bound */
|
||||
TEGRA_SET_EMC_CAP, /* upper bound */
|
||||
TEGRA_SET_EMC_ISO_CAP, /* upper bound that affects ISO Bw */
|
||||
TEGRA_SET_EMC_SHARED_BW, /* shared bw request */
|
||||
TEGRA_SET_EMC_SHARED_BW_ISO, /* for use by ISO Mgr only */
|
||||
TEGRA_SET_EMC_REQ_COUNT /* Should always be last */
|
||||
};
|
||||
|
||||
struct nvhost_clock {
|
||||
char *name;
|
||||
unsigned long default_rate;
|
||||
u32 moduleid;
|
||||
enum tegra_emc_request_type request_type;
|
||||
bool disable_scaling;
|
||||
unsigned long devfreq_rate;
|
||||
};
|
||||
|
||||
struct nvhost_vm_hwid {
|
||||
u64 addr;
|
||||
bool dynamic;
|
||||
u32 shift;
|
||||
};
|
||||
|
||||
/*
|
||||
* Defines HW and SW class identifiers.
|
||||
*
|
||||
* This is module ID mapping between userspace and kernelspace.
|
||||
* The values of enum entries' are referred from NvRmModuleID enum defined
|
||||
* in below userspace file:
|
||||
* $TOP/vendor/nvidia/tegra/core/include/nvrm_module.h
|
||||
* Please make sure each entry below has same value as set in above file.
|
||||
*/
|
||||
enum nvhost_module_identifier {
|
||||
|
||||
/* Specifies external memory (DDR RAM, etc) */
|
||||
NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER = 75,
|
||||
|
||||
/* Specifies CBUS floor client module */
|
||||
NVHOST_MODULE_ID_CBUS_FLOOR = 119,
|
||||
|
||||
/* Specifies shared EMC client module */
|
||||
NVHOST_MODULE_ID_EMC_SHARED,
|
||||
NVHOST_MODULE_ID_MAX
|
||||
};
|
||||
|
||||
enum nvhost_resource_policy {
|
||||
RESOURCE_PER_DEVICE = 0,
|
||||
RESOURCE_PER_CHANNEL_INSTANCE,
|
||||
};
|
||||
|
||||
struct nvhost_device_data {
|
||||
int version; /* ip version number of device */
|
||||
int id; /* Separates clients of same hw */
|
||||
void __iomem *aperture[NVHOST_MODULE_MAX_IORESOURCE_MEM];
|
||||
struct device_dma_parameters dma_parms;
|
||||
|
||||
u32 modulemutexes[NVHOST_MODULE_MAX_MODMUTEXES];
|
||||
u32 moduleid; /* Module id for user space API */
|
||||
|
||||
/* interrupt ISR routine for falcon based engines */
|
||||
int (*flcn_isr)(struct platform_device *dev);
|
||||
int irq;
|
||||
int module_irq; /* IRQ bit from general intr reg for module intr */
|
||||
spinlock_t mirq_lock; /* spin lock for module irq */
|
||||
bool self_config_flcn_isr; /* skip setting up falcon interrupts */
|
||||
|
||||
/* Should we toggle the engine SLCG when we turn on the domain? */
|
||||
bool poweron_toggle_slcg;
|
||||
|
||||
/* Flag to set SLCG notifier (for the modules other than VIC) */
|
||||
bool slcg_notifier_enable;
|
||||
|
||||
/* Used to serialize channel when map-at-submit is used w/o mlocks */
|
||||
u32 last_submit_syncpt_id;
|
||||
u32 last_submit_syncpt_value;
|
||||
|
||||
bool power_on; /* If module is powered on */
|
||||
|
||||
u32 class; /* Device class */
|
||||
bool exclusive; /* True if only one user at a time */
|
||||
bool keepalive; /* Do not power gate when opened */
|
||||
bool serialize; /* Serialize submits in the channel */
|
||||
bool push_work_done; /* Push_op done into push buffer */
|
||||
bool poweron_reset; /* Reset the engine before powerup */
|
||||
bool virtual_dev; /* True if virtualized device */
|
||||
char *devfs_name; /* Name in devfs */
|
||||
char *devfs_name_family; /* Core of devfs name */
|
||||
|
||||
/* Support aborting the channel with close(channel_fd) */
|
||||
bool support_abort_on_close;
|
||||
|
||||
char *firmware_name; /* Name of firmware */
|
||||
bool firmware_not_in_subdir; /*
|
||||
* Firmware is not located in
|
||||
* chip subdirectory
|
||||
*/
|
||||
|
||||
bool engine_can_cg; /* True if CG is enabled */
|
||||
bool can_powergate; /* True if module can be power gated */
|
||||
int autosuspend_delay;/* Delay before power gated */
|
||||
struct nvhost_clock clocks[NVHOST_MODULE_MAX_CLOCKS];/* Clock names */
|
||||
|
||||
/* Clock gating registers */
|
||||
struct nvhost_gating_register *engine_cg_regs;
|
||||
|
||||
int num_clks; /* Number of clocks opened for dev */
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST)
|
||||
struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
|
||||
#else
|
||||
struct clk_bulk_data *clks;
|
||||
#endif
|
||||
struct mutex lock; /* Power management lock */
|
||||
struct list_head client_list; /* List of clients and rate requests */
|
||||
|
||||
int num_channels; /* Max num of channel supported */
|
||||
int num_mapped_chs; /* Num of channel mapped to device */
|
||||
int num_ppc; /* Number of pixels per clock cycle */
|
||||
|
||||
/* device node for channel operations */
|
||||
dev_t cdev_region;
|
||||
struct device *node;
|
||||
struct cdev cdev;
|
||||
|
||||
/* Address space device node */
|
||||
struct device *as_node;
|
||||
struct cdev as_cdev;
|
||||
|
||||
/* device node for ctrl block */
|
||||
struct class *nvhost_class;
|
||||
struct device *ctrl_node;
|
||||
struct cdev ctrl_cdev;
|
||||
const struct file_operations *ctrl_ops; /* ctrl ops for the module */
|
||||
|
||||
/* address space operations */
|
||||
const struct nvhost_as_moduleops *as_ops;
|
||||
|
||||
struct kobject *power_kobj; /* kobject to hold power sysfs entries */
|
||||
struct nvhost_device_power_attr *power_attrib; /* sysfs attributes */
|
||||
/* kobject to hold clk_cap sysfs entries */
|
||||
struct kobject clk_cap_kobj;
|
||||
struct kobj_attribute *clk_cap_attrs;
|
||||
struct dentry *debugfs; /* debugfs directory */
|
||||
|
||||
u32 nvhost_timeout_default;
|
||||
|
||||
/* Data for devfreq usage */
|
||||
struct devfreq *power_manager;
|
||||
/* Private device profile data */
|
||||
struct nvhost_device_profile *power_profile;
|
||||
/* Should we read load estimate from hardware? */
|
||||
bool actmon_enabled;
|
||||
/* Should we do linear emc scaling? */
|
||||
bool linear_emc;
|
||||
/* Offset to actmon registers */
|
||||
u32 actmon_regs;
|
||||
/* WEIGHT_COUNT of actmon */
|
||||
u32 actmon_weight_count;
|
||||
struct nvhost_actmon_register *actmon_setting_regs;
|
||||
/* Devfreq governor name */
|
||||
const char *devfreq_governor;
|
||||
unsigned long *freq_table;
|
||||
|
||||
/* Marks if the device is booted when pm runtime is disabled */
|
||||
bool booted;
|
||||
|
||||
/* Should be marked as true if nvhost shouldn't create device nodes */
|
||||
bool kernel_only;
|
||||
|
||||
void *private_data; /* private platform data */
|
||||
void *falcon_data; /* store the falcon info */
|
||||
struct platform_device *pdev; /* owner platform_device */
|
||||
void *virt_priv; /* private data for virtualized dev */
|
||||
#if IS_ENABLED(CONFIG_TEGRA_HOST1X)
|
||||
struct host1x *host1x; /* host1x device */
|
||||
#endif
|
||||
|
||||
struct mutex no_poweroff_req_mutex;
|
||||
struct dev_pm_qos_request no_poweroff_req;
|
||||
int no_poweroff_req_count;
|
||||
|
||||
struct notifier_block toggle_slcg_notifier;
|
||||
|
||||
struct rw_semaphore busy_lock;
|
||||
bool forced_idle;
|
||||
|
||||
/* Finalize power on. Can be used for context restore. */
|
||||
int (*finalize_poweron)(struct platform_device *dev);
|
||||
|
||||
/* Called each time we enter the class */
|
||||
int (*init_class_context)(struct platform_device *dev,
|
||||
struct nvhost_cdma *cdma);
|
||||
|
||||
/*
|
||||
* Reset the unit. Used for timeout recovery, resetting the unit on
|
||||
* probe and when un-powergating.
|
||||
*/
|
||||
void (*reset)(struct platform_device *dev);
|
||||
|
||||
/* Device is busy. */
|
||||
void (*busy)(struct platform_device *dev);
|
||||
|
||||
/* Device is idle. */
|
||||
void (*idle)(struct platform_device *dev);
|
||||
|
||||
/* Scaling init is run on device registration */
|
||||
void (*scaling_init)(struct platform_device *dev);
|
||||
|
||||
/* Scaling deinit is called on device unregistration */
|
||||
void (*scaling_deinit)(struct platform_device *dev);
|
||||
|
||||
/* Postscale callback is called after frequency change */
|
||||
void (*scaling_post_cb)(struct nvhost_device_profile *profile,
|
||||
unsigned long freq);
|
||||
|
||||
/* Preparing for power off. Used for context save. */
|
||||
int (*prepare_poweroff)(struct platform_device *dev);
|
||||
|
||||
/* paring for power off. Used for context save. */
|
||||
int (*aggregate_constraints)(struct platform_device *dev,
|
||||
int clk_index,
|
||||
unsigned long floor_rate,
|
||||
unsigned long pixel_rate,
|
||||
unsigned long bw_rate);
|
||||
|
||||
/*
|
||||
* Called after successful client device init. This can
|
||||
* be used in cases where the hardware specifics differ
|
||||
* between hardware revisions
|
||||
*/
|
||||
int (*hw_init)(struct platform_device *dev);
|
||||
|
||||
/* Used to add platform specific masks on reloc address */
|
||||
dma_addr_t (*get_reloc_phys_addr)(dma_addr_t phys_addr, u32 reloc_type);
|
||||
|
||||
/* Allocates a context handler for the device */
|
||||
struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
|
||||
struct nvhost_channel *ch);
|
||||
|
||||
/* engine specific init functions */
|
||||
int (*pre_virt_init)(struct platform_device *pdev);
|
||||
int (*post_virt_init)(struct platform_device *pdev);
|
||||
|
||||
/* engine specific functions */
|
||||
int (*memory_init)(struct platform_device *pdev);
|
||||
|
||||
phys_addr_t carveout_addr;
|
||||
phys_addr_t carveout_size;
|
||||
|
||||
/* Information related to engine-side synchronization */
|
||||
void *syncpt_unit_interface;
|
||||
|
||||
u64 transcfg_addr;
|
||||
u32 transcfg_val;
|
||||
u64 mamask_addr;
|
||||
u32 mamask_val;
|
||||
u64 borps_addr;
|
||||
u32 borps_val;
|
||||
struct nvhost_vm_hwid vm_regs[13];
|
||||
|
||||
/* Actmon IRQ from hintstatus_r */
|
||||
unsigned int actmon_irq;
|
||||
|
||||
/* Is the device already forced on? */
|
||||
bool forced_on;
|
||||
|
||||
/* Should we map channel at submit time? */
|
||||
bool resource_policy;
|
||||
|
||||
/* Should we enable context isolation for this device? */
|
||||
bool isolate_contexts;
|
||||
|
||||
/* channel user context list */
|
||||
struct mutex userctx_list_lock;
|
||||
struct list_head userctx_list;
|
||||
|
||||
/* reset control for this device */
|
||||
struct reset_control *reset_control;
|
||||
|
||||
/*
|
||||
* For loadable nvgpu module, we dynamically assign function
|
||||
* pointer of gk20a_debug_dump_device once the module loads
|
||||
*/
|
||||
void *debug_dump_data;
|
||||
void (*debug_dump_device)(void *dev);
|
||||
|
||||
/* icc client id for emc requests */
|
||||
int icc_id;
|
||||
|
||||
/* icc_path handle */
|
||||
struct icc_path *icc_path_handle;
|
||||
|
||||
/* bandwidth manager client id for emc requests */
|
||||
int bwmgr_client_id;
|
||||
|
||||
/* bandwidth manager handle */
|
||||
struct tegra_bwmgr_client *bwmgr_handle;
|
||||
|
||||
/* number of frames mlock can be locked for */
|
||||
u32 mlock_timeout_factor;
|
||||
|
||||
/* eventlib id for the device */
|
||||
int eventlib_id;
|
||||
|
||||
/* deliver task timestamps for falcon */
|
||||
void (*enable_timestamps)(struct platform_device *pdev,
|
||||
struct nvhost_cdma *cdma, dma_addr_t timestamp_addr);
|
||||
|
||||
/* enable risc-v boot */
|
||||
bool enable_riscv_boot;
|
||||
|
||||
/* store the risc-v info */
|
||||
void *riscv_data;
|
||||
|
||||
/* name of riscv descriptor binary */
|
||||
char *riscv_desc_bin;
|
||||
|
||||
/* name of riscv image binary */
|
||||
char *riscv_image_bin;
|
||||
|
||||
/* Flag to enable the debugfs to query module usage */
|
||||
bool enable_usage_debugfs;
|
||||
|
||||
/* Module clock cycles per actmon sample */
|
||||
u32 cycles_per_actmon_sample;
|
||||
};
|
||||
|
||||
|
||||
static inline
|
||||
struct nvhost_device_data *nvhost_get_devdata(struct platform_device *pdev)
|
||||
{
|
||||
return (struct nvhost_device_data *)platform_get_drvdata(pdev);
|
||||
}
|
||||
|
||||
static inline bool nvhost_dev_is_virtual(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
|
||||
return pdata->virtual_dev;
|
||||
}
|
||||
|
||||
struct nvhost_device_power_attr {
|
||||
struct platform_device *ndev;
|
||||
struct kobj_attribute power_attr[NVHOST_POWER_SYSFS_ATTRIB_MAX];
|
||||
};
|
||||
|
||||
int flcn_intr_init(struct platform_device *pdev);
|
||||
int flcn_reload_fw(struct platform_device *pdev);
|
||||
int nvhost_flcn_prepare_poweroff(struct platform_device *pdev);
|
||||
int nvhost_flcn_finalize_poweron(struct platform_device *dev);
|
||||
|
||||
/* common runtime pm and power domain APIs */
|
||||
int nvhost_module_init(struct platform_device *ndev);
|
||||
void nvhost_module_deinit(struct platform_device *dev);
|
||||
void nvhost_module_reset(struct platform_device *dev, bool reboot);
|
||||
void nvhost_module_idle(struct platform_device *dev);
|
||||
void nvhost_module_idle_mult(struct platform_device *pdev, int refs);
|
||||
int nvhost_module_busy(struct platform_device *dev);
|
||||
extern const struct dev_pm_ops nvhost_module_pm_ops;
|
||||
|
||||
void host1x_writel(struct platform_device *dev, u32 r, u32 v);
|
||||
u32 host1x_readl(struct platform_device *dev, u32 r);
|
||||
|
||||
/* common device management APIs */
|
||||
int nvhost_client_device_get_resources(struct platform_device *dev);
|
||||
int nvhost_client_device_release(struct platform_device *dev);
|
||||
int nvhost_client_device_init(struct platform_device *dev);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
u32 nvhost_get_syncpt_host_managed(struct platform_device *pdev,
|
||||
u32 param, const char *syncpt_name);
|
||||
u32 nvhost_get_syncpt_client_managed(struct platform_device *pdev,
|
||||
const char *syncpt_name);
|
||||
void nvhost_syncpt_put_ref_ext(struct platform_device *pdev, u32 id);
|
||||
bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id);
|
||||
void nvhost_syncpt_set_minval(struct platform_device *dev, u32 id, u32 val);
|
||||
void nvhost_syncpt_set_min_update(struct platform_device *pdev, u32 id, u32 val);
|
||||
int nvhost_syncpt_read_ext_check(struct platform_device *dev, u32 id, u32 *val);
|
||||
u32 nvhost_syncpt_read_maxval(struct platform_device *dev, u32 id);
|
||||
u32 nvhost_syncpt_incr_max_ext(struct platform_device *dev, u32 id, u32 incrs);
|
||||
int nvhost_syncpt_is_expired_ext(struct platform_device *dev, u32 id,
|
||||
u32 thresh);
|
||||
dma_addr_t nvhost_syncpt_address(struct platform_device *engine_pdev, u32 id);
|
||||
int nvhost_syncpt_unit_interface_init(struct platform_device *pdev);
|
||||
|
||||
/* public host1x interrupt management APIs */
|
||||
int nvhost_intr_register_notifier(struct platform_device *pdev,
|
||||
u32 id, u32 thresh,
|
||||
void (*callback)(void *, int),
|
||||
void *private_data);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
#ifdef CONFIG_TEGRA_HOST1X
|
||||
|
||||
static inline struct flcn *get_flcn(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
|
||||
return pdata ? pdata->falcon_data : NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_module_set_rate(struct platform_device *dev, void *priv,
|
||||
unsigned long constraint, int index,
|
||||
unsigned long attr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nvhost_module_add_client(struct platform_device *dev, void *priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_module_remove_client(struct platform_device *dev, void *priv) { }
|
||||
|
||||
static inline int nvhost_syncpt_get_cv_dev_address_table(struct platform_device *engine_pdev,
|
||||
int *count, dma_addr_t **table)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_debug_dump_device(struct platform_device *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *),
|
||||
void *data)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
|
||||
int num_cmdbufs, int num_relocs, int num_waitchks,
|
||||
int num_syncpts)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_job_put(struct nvhost_job *job) {}
|
||||
|
||||
static inline int nvhost_job_add_client_gather_address(struct nvhost_job *job,
|
||||
u32 num_words, u32 class_id, dma_addr_t gather_address)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int nvhost_channel_map(struct nvhost_device_data *pdata,
|
||||
struct nvhost_channel **ch,
|
||||
void *identifier)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void *nvhost_get_private_data(struct platform_device *_dev)
|
||||
{
|
||||
struct nvhost_device_data *pdata =
|
||||
(struct nvhost_device_data *)platform_get_drvdata(_dev);
|
||||
WARN_ON(!pdata);
|
||||
return pdata ? pdata->private_data : NULL;
|
||||
}
|
||||
|
||||
static inline struct nvhost_master *nvhost_get_host(
|
||||
struct platform_device *_dev)
|
||||
{
|
||||
struct device *parent = _dev->dev.parent;
|
||||
struct device *dev = &_dev->dev;
|
||||
|
||||
/*
|
||||
* host1x has no parent dev on non-DT configuration or has
|
||||
* platform_bus on DT configuration. So search for a device
|
||||
* whose parent is NULL or platform_bus
|
||||
*/
|
||||
while (parent && parent != &platform_bus) {
|
||||
dev = parent;
|
||||
parent = parent->parent;
|
||||
}
|
||||
|
||||
return nvhost_get_private_data(to_platform_device(dev));
|
||||
}
|
||||
|
||||
static inline int nvhost_channel_submit(struct nvhost_job *job)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void nvhost_putchannel(struct nvhost_channel *ch, int cnt) {}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_fence_get(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_put(struct nvhost_fence *fence) {}
|
||||
|
||||
static inline int nvhost_fence_num_pts(struct nvhost_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline dma_addr_t nvhost_t194_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline dma_addr_t nvhost_t23x_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_task(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thres,
|
||||
u64 timestamp_start,
|
||||
u64 timestamp_end)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_submit(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thresh,
|
||||
u64 timestamp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_fences(struct platform_device *pdev,
|
||||
u32 task_syncpt_id,
|
||||
u32 task_syncpt_thresh,
|
||||
struct nvdev_fence *fences,
|
||||
u8 num_fences,
|
||||
enum nvdev_fence_kind kind,
|
||||
u64 timestamp)
|
||||
{
|
||||
}
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void nvhost_register_dump_device(
|
||||
struct platform_device *dev,
|
||||
void (*nvgpu_debug_dump_device)(void *),
|
||||
void *data);
|
||||
void nvhost_unregister_dump_device(struct platform_device *dev);
|
||||
#else
|
||||
static inline void nvhost_register_dump_device(
|
||||
struct platform_device *dev,
|
||||
void (*nvgpu_debug_dump_device)(void *),
|
||||
void *data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_unregister_dump_device(struct platform_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v);
|
||||
u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r);
|
||||
|
||||
void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v);
|
||||
u32 host1x_sync_readl(struct nvhost_master *dev, u32 r);
|
||||
|
||||
/* public host1x power management APIs */
|
||||
bool nvhost_module_powered_ext(struct platform_device *dev);
|
||||
/* This power ON only host1x and doesn't power ON module */
|
||||
int nvhost_module_busy_ext(struct platform_device *dev);
|
||||
/* This power OFF only host1x and doesn't power OFF module */
|
||||
void nvhost_module_idle_ext(struct platform_device *dev);
|
||||
|
||||
/* public api to return platform_device ptr to the default host1x instance */
|
||||
struct platform_device *nvhost_get_default_device(void);
|
||||
|
||||
/* Public PM nvhost APIs. */
|
||||
/* This power ON both host1x and module */
|
||||
int nvhost_module_busy(struct platform_device *dev);
|
||||
/* This power OFF both host1x and module */
|
||||
void nvhost_module_idle(struct platform_device *dev);
|
||||
|
||||
/* public api to register/unregister a subdomain */
|
||||
void nvhost_register_client_domain(struct generic_pm_domain *domain);
|
||||
void nvhost_unregister_client_domain(struct generic_pm_domain *domain);
|
||||
|
||||
int nvhost_module_add_client(struct platform_device *dev,
|
||||
void *priv);
|
||||
void nvhost_module_remove_client(struct platform_device *dev,
|
||||
void *priv);
|
||||
|
||||
int nvhost_module_set_rate(struct platform_device *dev, void *priv,
|
||||
unsigned long constraint, int index, unsigned long attr);
|
||||
|
||||
/* public APIs required to submit in-kernel work */
|
||||
int nvhost_channel_map(struct nvhost_device_data *pdata,
|
||||
struct nvhost_channel **ch,
|
||||
void *identifier);
|
||||
void nvhost_putchannel(struct nvhost_channel *ch, int cnt);
|
||||
/* Allocate memory for a job. Just enough memory will be allocated to
|
||||
* accommodate the submit announced in submit header.
|
||||
*/
|
||||
struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
|
||||
int num_cmdbufs, int num_relocs, int num_waitchks,
|
||||
int num_syncpts);
|
||||
/* Decrement reference job, free if goes to zero. */
|
||||
void nvhost_job_put(struct nvhost_job *job);
|
||||
|
||||
/* Add a gather with IOVA address to job */
|
||||
int nvhost_job_add_client_gather_address(struct nvhost_job *job,
|
||||
u32 num_words, u32 class_id, dma_addr_t gather_address);
|
||||
int nvhost_channel_submit(struct nvhost_job *job);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
u32 nvhost_get_syncpt_client_managed(struct platform_device *pdev,
|
||||
const char *syncpt_name);
|
||||
void nvhost_syncpt_get_ref_ext(struct platform_device *pdev, u32 id);
|
||||
const char *nvhost_syncpt_get_name(struct platform_device *dev, int id);
|
||||
void nvhost_syncpt_cpu_incr_ext(struct platform_device *dev, u32 id);
|
||||
int nvhost_syncpt_read_ext_check(struct platform_device *dev, u32 id, u32 *val);
|
||||
int nvhost_syncpt_wait_timeout_ext(struct platform_device *dev, u32 id, u32 thresh,
|
||||
u32 timeout, u32 *value, struct timespec64 *ts);
|
||||
int nvhost_syncpt_create_fence_single_ext(struct platform_device *dev,
|
||||
u32 id, u32 thresh, const char *name, int *fence_fd);
|
||||
void nvhost_syncpt_set_min_eq_max_ext(struct platform_device *dev, u32 id);
|
||||
int nvhost_syncpt_nb_pts_ext(struct platform_device *dev);
|
||||
bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id);
|
||||
u32 nvhost_syncpt_read_minval(struct platform_device *dev, u32 id);
|
||||
void nvhost_syncpt_set_maxval(struct platform_device *dev, u32 id, u32 val);
|
||||
int nvhost_syncpt_fd_get_ext(int fd, struct platform_device *pdev, u32 *id);
|
||||
|
||||
void nvhost_eventlib_log_task(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thres,
|
||||
u64 timestamp_start,
|
||||
u64 timestamp_end);
|
||||
|
||||
void nvhost_eventlib_log_submit(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thresh,
|
||||
u64 timestamp);
|
||||
|
||||
void nvhost_eventlib_log_fences(struct platform_device *pdev,
|
||||
u32 task_syncpt_id,
|
||||
u32 task_syncpt_thresh,
|
||||
struct nvdev_fence *fences,
|
||||
u8 num_fences,
|
||||
enum nvdev_fence_kind kind,
|
||||
u64 timestamp);
|
||||
|
||||
dma_addr_t nvhost_t194_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type);
|
||||
dma_addr_t nvhost_t23x_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type);
|
||||
|
||||
/* public host1x interrupt management APIs */
|
||||
int nvhost_intr_register_fast_notifier(struct platform_device *pdev,
|
||||
u32 id, u32 thresh,
|
||||
void (*callback)(void *, int),
|
||||
void *private_data);
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST) && defined(CONFIG_DEBUG_FS)
|
||||
void nvhost_debug_dump_device(struct platform_device *pdev);
|
||||
#else
|
||||
static inline void nvhost_debug_dump_device(struct platform_device *pdev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST)
|
||||
const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn);
|
||||
#else
|
||||
static inline const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC)
|
||||
|
||||
int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *),
|
||||
void *data);
|
||||
|
||||
int nvhost_fence_get_pt(
|
||||
struct nvhost_fence *fence, size_t i,
|
||||
u32 *id, u32 *threshold);
|
||||
|
||||
struct nvhost_fence *nvhost_fence_create(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name);
|
||||
int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd);
|
||||
|
||||
struct nvhost_fence *nvhost_fence_get(int fd);
|
||||
struct nvhost_fence *nvhost_fence_dup(struct nvhost_fence *fence);
|
||||
int nvhost_fence_num_pts(struct nvhost_fence *fence);
|
||||
int nvhost_fence_install(struct nvhost_fence *fence, int fence_fd);
|
||||
void nvhost_fence_put(struct nvhost_fence *fence);
|
||||
void nvhost_fence_wait(struct nvhost_fence *fence, u32 timeout_in_ms);
|
||||
|
||||
#else
|
||||
|
||||
static inline int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *d),
|
||||
void *d)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_create_fence(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_fence_get(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_num_pts(struct nvhost_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_put(struct nvhost_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_wait(struct nvhost_fence *fence, u32 timeout_in_ms)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) && !defined(CONFIG_SYNC)
|
||||
int nvhost_dma_fence_unpack(struct dma_fence *fence, u32 *id, u32 *threshold);
|
||||
bool nvhost_dma_fence_is_waitable(struct dma_fence *fence);
|
||||
#else
|
||||
static inline int nvhost_dma_fence_unpack(struct dma_fence *fence, u32 *id,
|
||||
u32 *threshold)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline bool nvhost_dma_fence_is_waitable(struct dma_fence *fence)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) && defined(CONFIG_SYNC)
|
||||
struct sync_fence *nvhost_sync_fdget(int fd);
|
||||
int nvhost_sync_num_pts(struct sync_fence *fence);
|
||||
struct sync_fence *nvhost_sync_create_fence(struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts, const char *name);
|
||||
int nvhost_sync_create_fence_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd);
|
||||
int nvhost_sync_fence_set_name(int fence_fd, const char *name);
|
||||
u32 nvhost_sync_pt_id(struct sync_pt *__pt);
|
||||
u32 nvhost_sync_pt_thresh(struct sync_pt *__pt);
|
||||
struct sync_pt *nvhost_sync_pt_from_fence_index(struct sync_fence *fence,
|
||||
u32 sync_pt_index);
|
||||
#else
|
||||
static inline struct sync_fence *nvhost_sync_fdget(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_num_pts(struct sync_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sync_fence *nvhost_sync_create_fence(struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts, const char *name)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_create_fence_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_fence_set_name(int fence_fd, const char *name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_sync_pt_id(struct sync_pt *__pt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_sync_pt_thresh(struct sync_pt *__pt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sync_pt *nvhost_sync_pt_from_fence_index(
|
||||
struct sync_fence *fence, u32 sync_pt_index)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Hacky way to get access to struct nvhost_device_data for VI device. */
|
||||
extern struct nvhost_device_data t20_vi_info;
|
||||
extern struct nvhost_device_data t30_vi_info;
|
||||
extern struct nvhost_device_data t11_vi_info;
|
||||
extern struct nvhost_device_data t14_vi_info;
|
||||
|
||||
int nvdec_do_idle(void);
|
||||
int nvdec_do_unidle(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
16
drivers/video/tegra/host/nvcsi/Makefile
Normal file
16
drivers/video/tegra/host/nvcsi/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/drivers/gpu/host1x/hw/
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/drivers/gpu/host1x/include
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/include
|
||||
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/media/platform/tegra
|
||||
|
||||
nvhost-nvcsi-objs = \
|
||||
nvcsi.o \
|
||||
deskew.o
|
||||
|
||||
obj-m += nvhost-nvcsi.o
|
||||
|
||||
nvhost-nvcsi-t194-objs = nvcsi-t194.o
|
||||
obj-m += nvhost-nvcsi-t194.o
|
||||
745
drivers/video/tegra/host/nvcsi/deskew.c
Normal file
745
drivers/video/tegra/host/nvcsi/deskew.c
Normal file
@@ -0,0 +1,745 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Deskew driver
|
||||
*
|
||||
* Copyright (c) 2014-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "deskew.h"
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/fs.h>
|
||||
#include <asm/ioctls.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <uapi/linux/nvhost_nvcsi_ioctl.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nvhost.h>
|
||||
|
||||
#include <media/mc_common.h>
|
||||
|
||||
//#include "camera/nvcsi/csi5_fops.h"
|
||||
|
||||
static struct tegra_csi_device *mc_csi;
|
||||
static struct mutex deskew_lock;
|
||||
|
||||
static int debugfs_deskew_clk_stats_low[NVCSI_PHY_CIL_NUM_LANE];
|
||||
static int debugfs_deskew_clk_stats_high[NVCSI_PHY_CIL_NUM_LANE];
|
||||
static int debugfs_deskew_data_stats_low[NVCSI_PHY_CIL_NUM_LANE];
|
||||
static int debugfs_deskew_data_stats_high[NVCSI_PHY_CIL_NUM_LANE];
|
||||
|
||||
static unsigned int enabled_deskew_lanes;
|
||||
static unsigned int done_deskew_lanes;
|
||||
|
||||
static int nvcsi_deskew_apply_helper(unsigned int active_lanes);
|
||||
|
||||
static bool is_t19x_or_greater;
|
||||
// a regmap for address changes between chips
|
||||
static uint32_t regs[REGS_COUNT];
|
||||
|
||||
static const uint32_t t194_regs[REGS_COUNT] = {
|
||||
0x101e4, //< NVCSI_STREAM_0_ERROR_STATUS2VI_MASK regs[0]
|
||||
0x181e4, //< NVCSI_STREAM_1_ERROR_STATUS2VI_MASK regs[1]
|
||||
0x0ffff, //< CFG_ERR_STATUS2VI_MASK_ALL regs[2]
|
||||
0x10400, //< NVCSI_PHY_0_CILA_INTR_STATUS regs[3]
|
||||
0x10408, //< NVCSI_PHY_0_CILA_INTR_MASK regs[4]
|
||||
0x10800, //< NVCSI_PHY_0_CILB_INTR_STATUS regs[5]
|
||||
0x10808, //< NVCSI_PHY_0_CILB_INTR_MASK regs[6]
|
||||
0x11000, //< NVCSI_PHY_0_NVCSI_CIL_PHY_CTRL_0 regs[7]
|
||||
0x24, //< NVCSI_CIL_A_SW_RESET_0_OFFSET regs[8]
|
||||
0x38, //< NVCSI_CIL_A_CLK_DESKEW_CTRL_0_OFFSET regs[9]
|
||||
0x30, //< NVCSI_CIL_A_DPHY_INADJ_CTRL_0_OFFSET regs[10]
|
||||
0x3c, //< NVCSI_CIL_A_DATA_DESKEW_CTRL_0_OFFSET regs[11]
|
||||
0x40, //< NVCSI_CIL_A_DPHY_DESKEW_STATUS_0_OFFSET regs[12]
|
||||
0x44, //< NVCSI_CIL_A_DPHY_DESKEW_DATA_CALIB_STATUS_LOW_0_0_OFFSET regs[13]
|
||||
0x48, //< NVCSI_CIL_A_DPHY_DESKEW_DATA_CALIB_STATUS_HIGH_0_0_OFFSET regs[14]
|
||||
0x54, //< NVCSI_CIL_A_DPHY_DESKEW_CLK_CALIB_STATUS_LOW_0_0_OFFSET regs[15]
|
||||
0x58, //< NVCSI_CIL_A_DPHY_DESKEW_CLK_CALIB_STATUS_HIGH_0_0_OFFSET regs[16]
|
||||
0x6c, //< NVCSI_CIL_A_DESKEW_CONTROL_0_OFFSET regs[17]
|
||||
0x70, //< NVCSI_CIL_A_CONTROL_0_OFFSET regs[18]
|
||||
0xf << 4, //< DESKEW_COMPARE regs[19]
|
||||
4, //< DESKEW_COMPARE_SHIFT regs[20]
|
||||
0xf << 0, //< DESKEW_SETTLE regs[21]
|
||||
0, //< DESKEW_SETTLE_SHIFT regs[22]
|
||||
0x7f << 17, //< CLK_SETTLE regs[23]
|
||||
17, //< CLK_SETTLE_SHIFT0 regs[24]
|
||||
0xff << 1, //< THS_SETTLE0 regs[25]
|
||||
0xff << 9, //< THS_SETTLE1 regs[26]
|
||||
1, //< THS_SETTLE0_SHIFT regs[27]
|
||||
9, //< THS_SETTLE1_SHIFT regs[28]
|
||||
0xbc, //< NVCSI_CIL_B_DPHY_INADJ_CTRL_0_OFFSET regs[29]
|
||||
0xc4, //< NVCSI_CIL_B_CLK_DESKEW_CTRL_0_OFFSET regs[30]
|
||||
0xc8, //< NVCSI_CIL_B_DATA_DESKEW_CTRL_0_OFFSET regs[31]
|
||||
0xcc, //< NVCSI_CIL_B_DPHY_DESKEW_STATUS_0_OFFSET regs[32]
|
||||
0xf8, //< NVCSI_CIL_B_DESKEW_CONTROL_0_OFFSET regs[33]
|
||||
0xfc, //< NVCSI_CIL_B_CONTROL_0_OFFSET regs[34]
|
||||
0x8c, //< NVCSI_CIL_B_OFFSET regs[35]
|
||||
};
|
||||
|
||||
void nvcsi_deskew_platform_setup(struct tegra_csi_device *dev, bool t19x)
|
||||
{
|
||||
int i;
|
||||
|
||||
mc_csi = dev;
|
||||
is_t19x_or_greater = t19x;
|
||||
mutex_init(&deskew_lock);
|
||||
enabled_deskew_lanes = 0;
|
||||
done_deskew_lanes = 0;
|
||||
if (is_t19x_or_greater)
|
||||
for (i = 0; i < REGS_COUNT; ++i)
|
||||
regs[i] = t194_regs[i];
|
||||
}
|
||||
|
||||
static inline void set_enabled_with_lock(unsigned int active_lanes)
|
||||
{
|
||||
mutex_lock(&deskew_lock);
|
||||
enabled_deskew_lanes |= active_lanes;
|
||||
mutex_unlock(&deskew_lock);
|
||||
}
|
||||
|
||||
static inline void unset_enabled_with_lock(unsigned int active_lanes)
|
||||
{
|
||||
mutex_lock(&deskew_lock);
|
||||
enabled_deskew_lanes &= ~active_lanes;
|
||||
mutex_unlock(&deskew_lock);
|
||||
}
|
||||
|
||||
static inline void set_done_with_lock(unsigned int done_lanes)
|
||||
{
|
||||
mutex_lock(&deskew_lock);
|
||||
done_deskew_lanes |= done_lanes;
|
||||
enabled_deskew_lanes &= ~done_lanes;
|
||||
mutex_unlock(&deskew_lock);
|
||||
}
|
||||
|
||||
|
||||
static inline void nvcsi_phy_write(unsigned int phy_num,
|
||||
unsigned int addr_offset, unsigned int val)
|
||||
{
|
||||
unsigned int addr;
|
||||
|
||||
addr = NVCSI_PHY_0_NVCSI_CIL_PHY_CTRL_0 + NVCSI_PHY_OFFSET * phy_num
|
||||
+addr_offset;
|
||||
dev_dbg(mc_csi->dev, "%s: addr %x val %x\n", __func__, addr,
|
||||
val);
|
||||
host1x_writel(mc_csi->pdev, addr, val);
|
||||
}
|
||||
static inline unsigned int nvcsi_phy_readl(unsigned int phy_num,
|
||||
unsigned int addr_offset)
|
||||
{
|
||||
unsigned int addr;
|
||||
int val;
|
||||
|
||||
addr = NVCSI_PHY_0_NVCSI_CIL_PHY_CTRL_0 + NVCSI_PHY_OFFSET * phy_num
|
||||
+ addr_offset;
|
||||
val = host1x_readl(mc_csi->pdev, addr);
|
||||
dev_dbg(mc_csi->dev, "%s: addr %x val %x\n", __func__, addr,
|
||||
val);
|
||||
return val;
|
||||
}
|
||||
|
||||
static void nvcsi_deskew_setup_start(unsigned int active_lanes)
|
||||
{
|
||||
unsigned int phy_num = 0;
|
||||
unsigned int cil_lanes = 0, cila_io_lanes = 0, cilb_io_lanes = 0;
|
||||
unsigned int remaining_lanes = active_lanes;
|
||||
unsigned int val = 0, newval = 0;
|
||||
|
||||
dev_dbg(mc_csi->dev, "%s: active_lanes: %x\n", __func__, active_lanes);
|
||||
while (remaining_lanes) {
|
||||
cil_lanes = (active_lanes & (0x000f << (phy_num * 4)))
|
||||
>> (phy_num * 4);
|
||||
cila_io_lanes = cil_lanes & (NVCSI_PHY_0_NVCSI_CIL_A_IO0
|
||||
| NVCSI_PHY_0_NVCSI_CIL_A_IO1);
|
||||
cilb_io_lanes = cil_lanes & (NVCSI_PHY_0_NVCSI_CIL_B_IO0
|
||||
| NVCSI_PHY_0_NVCSI_CIL_B_IO1);
|
||||
remaining_lanes &= ~(0xf << (phy_num * 4));
|
||||
if (cila_io_lanes) {
|
||||
/*
|
||||
* Disable single bit err when detecting leader
|
||||
* pattern
|
||||
*/
|
||||
newval = CFG_ERR_STATUS2VI_MASK_ALL;
|
||||
host1x_writel(mc_csi->pdev,
|
||||
NVCSI_STREAM_0_ERROR_STATUS2VI_MASK +
|
||||
NVCSI_PHY_OFFSET * phy_num,
|
||||
newval);
|
||||
val = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_A_DESKEW_CONTROL_0_OFFSET);
|
||||
val = (val & (~(DESKEW_COMPARE | DESKEW_SETTLE)
|
||||
))
|
||||
| (0x4 << DESKEW_COMPARE_SHIFT)
|
||||
| (0X6 << DESKEW_SETTLE_SHIFT);
|
||||
nvcsi_phy_write(phy_num,
|
||||
NVCSI_CIL_A_DESKEW_CONTROL_0_OFFSET,
|
||||
val);
|
||||
val = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_A_CONTROL_0_OFFSET);
|
||||
val = (val & (~(CLK_SETTLE | THS_SETTLE0 |
|
||||
THS_SETTLE1)))
|
||||
| (0x19 << CLK_SETTLE_SHIFT0)
|
||||
| (0x16 << THS_SETTLE0_SHIFT)
|
||||
| (0x16 << THS_SETTLE1_SHIFT);
|
||||
nvcsi_phy_write(phy_num, NVCSI_CIL_A_CONTROL_0_OFFSET,
|
||||
val);
|
||||
|
||||
if (is_t19x_or_greater)
|
||||
nvcsi_phy_write(phy_num,
|
||||
NVCSI_CIL_A_DPHY_DESKEW_RESULT_STATUS_OFFSET,
|
||||
0
|
||||
);
|
||||
|
||||
}
|
||||
if (cilb_io_lanes) {
|
||||
/*
|
||||
* Disable single bit err when detecting leader
|
||||
* pattern
|
||||
*/
|
||||
newval = CFG_ERR_STATUS2VI_MASK_ALL;
|
||||
host1x_writel(mc_csi->pdev,
|
||||
NVCSI_STREAM_1_ERROR_STATUS2VI_MASK +
|
||||
NVCSI_PHY_OFFSET * phy_num,
|
||||
newval);
|
||||
val = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_B_DESKEW_CONTROL_0_OFFSET);
|
||||
val = (val & (~(DESKEW_COMPARE | DESKEW_SETTLE)
|
||||
))
|
||||
| (0x4 << DESKEW_COMPARE_SHIFT)
|
||||
| (0X6 << DESKEW_SETTLE_SHIFT);
|
||||
nvcsi_phy_write(phy_num,
|
||||
NVCSI_CIL_B_DESKEW_CONTROL_0_OFFSET,
|
||||
val);
|
||||
val = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_B_CONTROL_0_OFFSET);
|
||||
val = (val & (~(CLK_SETTLE | THS_SETTLE0 |
|
||||
THS_SETTLE1)))
|
||||
| (0x19 << CLK_SETTLE_SHIFT0)
|
||||
| (0x16 << THS_SETTLE0_SHIFT)
|
||||
| (0x16 << THS_SETTLE1_SHIFT);
|
||||
nvcsi_phy_write(phy_num, NVCSI_CIL_B_CONTROL_0_OFFSET,
|
||||
val);
|
||||
|
||||
if (is_t19x_or_greater)
|
||||
nvcsi_phy_write(phy_num,
|
||||
NVCSI_CIL_B_DPHY_DESKEW_RESULT_STATUS_OFFSET,
|
||||
0
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
// HW sometimes throws an error during the deskew operation
|
||||
// without a delay here
|
||||
// let the nvcsi writes for deskew settings propagate properly
|
||||
// before enabling deskew
|
||||
usleep_range(40, 50);
|
||||
|
||||
if (cila_io_lanes) {
|
||||
val = CLK_INADJ_LIMIT_HIGH;
|
||||
nvcsi_phy_write(phy_num,
|
||||
NVCSI_CIL_A_CLK_DESKEW_CTRL_0_OFFSET,
|
||||
val | CLK_INADJ_SWEEP_CTRL
|
||||
);
|
||||
val = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_A_DATA_DESKEW_CTRL_0_OFFSET);
|
||||
newval =
|
||||
((cila_io_lanes & NVCSI_PHY_0_NVCSI_CIL_A_IO0) != 0 ?
|
||||
(DATA_INADJ_SWEEP_CTRL0 | DATA_INADJ_LIMIT_HIGH0) : 0)
|
||||
|
|
||||
((cila_io_lanes & NVCSI_PHY_0_NVCSI_CIL_A_IO1) != 0 ?
|
||||
(DATA_INADJ_SWEEP_CTRL1 | DATA_INADJ_LIMIT_HIGH1) : 0);
|
||||
|
||||
nvcsi_phy_write(phy_num,
|
||||
NVCSI_CIL_A_DATA_DESKEW_CTRL_0_OFFSET,
|
||||
((val & ~(DATA_INADJ_SWEEP_CTRL0 |
|
||||
DATA_INADJ_SWEEP_CTRL1)) |
|
||||
newval));
|
||||
}
|
||||
|
||||
if (cilb_io_lanes) {
|
||||
val = CLK_INADJ_LIMIT_HIGH;
|
||||
nvcsi_phy_write(phy_num,
|
||||
NVCSI_CIL_B_CLK_DESKEW_CTRL_0_OFFSET,
|
||||
val | CLK_INADJ_SWEEP_CTRL
|
||||
);
|
||||
val = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_B_DATA_DESKEW_CTRL_0_OFFSET);
|
||||
newval =
|
||||
((cilb_io_lanes & NVCSI_PHY_0_NVCSI_CIL_B_IO0) != 0 ?
|
||||
(DATA_INADJ_SWEEP_CTRL0 | DATA_INADJ_LIMIT_HIGH0) : 0)
|
||||
|
|
||||
((cilb_io_lanes & NVCSI_PHY_0_NVCSI_CIL_B_IO1) != 0 ?
|
||||
(DATA_INADJ_SWEEP_CTRL1 | DATA_INADJ_LIMIT_HIGH1) : 0);
|
||||
|
||||
nvcsi_phy_write(phy_num,
|
||||
NVCSI_CIL_B_DATA_DESKEW_CTRL_0_OFFSET,
|
||||
((val & ~(DATA_INADJ_SWEEP_CTRL0 |
|
||||
DATA_INADJ_SWEEP_CTRL1)) |
|
||||
newval));
|
||||
}
|
||||
phy_num++;
|
||||
}
|
||||
}
|
||||
|
||||
static int wait_cila_done(unsigned int phy_num, unsigned int cila_io_lanes,
|
||||
unsigned long timeout)
|
||||
{
|
||||
bool done;
|
||||
unsigned int val;
|
||||
|
||||
while (time_before(jiffies, timeout)) {
|
||||
done = true;
|
||||
val = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_A_DPHY_DESKEW_STATUS_0_OFFSET);
|
||||
if (cila_io_lanes & NVCSI_PHY_0_NVCSI_CIL_A_IO0)
|
||||
done &= !!(val & DPHY_CALIB_DONE_IO0);
|
||||
if (cila_io_lanes & NVCSI_PHY_0_NVCSI_CIL_A_IO1)
|
||||
done &= !!(val & DPHY_CALIB_DONE_IO1);
|
||||
if (val & DPHY_CALIB_ERR_IO1 || val & DPHY_CALIB_ERR_IO0)
|
||||
return -EINVAL;
|
||||
if (done)
|
||||
return 0;
|
||||
usleep_range(5, 10);
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int wait_cilb_done(unsigned int phy_num, unsigned int cilb_io_lanes,
|
||||
unsigned long timeout)
|
||||
{
|
||||
bool done;
|
||||
unsigned int val;
|
||||
|
||||
while (time_before(jiffies, timeout)) {
|
||||
done = true;
|
||||
val = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_B_DPHY_DESKEW_STATUS_0_OFFSET);
|
||||
if (cilb_io_lanes & NVCSI_PHY_0_NVCSI_CIL_B_IO0)
|
||||
done &= !!(val & DPHY_CALIB_DONE_IO0);
|
||||
if (cilb_io_lanes & NVCSI_PHY_0_NVCSI_CIL_B_IO1)
|
||||
done &= !!(val & DPHY_CALIB_DONE_IO1);
|
||||
if (val & DPHY_CALIB_ERR_IO1 || val & DPHY_CALIB_ERR_IO0)
|
||||
return -EINVAL;
|
||||
if (done)
|
||||
return 0;
|
||||
usleep_range(5, 10);
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int nvcsi_deskew_thread(void *data)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int phy_num = 0;
|
||||
unsigned int cil_lanes = 0, cila_io_lanes = 0, cilb_io_lanes = 0;
|
||||
struct nvcsi_deskew_context *ctx = data;
|
||||
unsigned int remaining_lanes = ctx->deskew_lanes;
|
||||
unsigned long timeout = 0;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(DESKEW_TIMEOUT_MSEC);
|
||||
|
||||
while (remaining_lanes) {
|
||||
cil_lanes = (ctx->deskew_lanes & (0x000f << (phy_num * 4)))
|
||||
>> (phy_num * 4);
|
||||
cila_io_lanes = cil_lanes & (NVCSI_PHY_0_NVCSI_CIL_A_IO0
|
||||
| NVCSI_PHY_0_NVCSI_CIL_A_IO1);
|
||||
cilb_io_lanes = cil_lanes & (NVCSI_PHY_0_NVCSI_CIL_B_IO0
|
||||
| NVCSI_PHY_0_NVCSI_CIL_B_IO1);
|
||||
remaining_lanes &= ~(0xf << (phy_num * 4));
|
||||
if (cila_io_lanes) {
|
||||
ret = wait_cila_done(phy_num, cila_io_lanes, timeout);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
if (cilb_io_lanes) {
|
||||
ret = wait_cilb_done(phy_num, cilb_io_lanes, timeout);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
phy_num++;
|
||||
}
|
||||
|
||||
ret = nvcsi_deskew_apply_helper(ctx->deskew_lanes);
|
||||
if (!ret) {
|
||||
dev_info(mc_csi->dev, "deskew finished for lanes 0x%04x",
|
||||
ctx->deskew_lanes);
|
||||
set_done_with_lock(ctx->deskew_lanes);
|
||||
} else {
|
||||
dev_info(mc_csi->dev,
|
||||
"deskew apply helper failed for lanes 0x%04x",
|
||||
ctx->deskew_lanes);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
||||
complete(&ctx->thread_done);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_info(mc_csi->dev, "deskew timed out for lanes 0x%04x",
|
||||
ctx->deskew_lanes);
|
||||
else if (ret == -EINVAL)
|
||||
dev_info(mc_csi->dev, "deskew calib err for lanes 0x%04x",
|
||||
ctx->deskew_lanes);
|
||||
unset_enabled_with_lock(ctx->deskew_lanes);
|
||||
complete(&ctx->thread_done);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvcsi_deskew_setup(struct nvcsi_deskew_context *ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int new_lanes;
|
||||
|
||||
if (!ctx || !ctx->deskew_lanes)
|
||||
return -EINVAL;
|
||||
|
||||
if (ctx->deskew_lanes >> NVCSI_PHY_CIL_NUM_LANE) {
|
||||
dev_err(mc_csi->dev, "%s Invalid lanes for deskew\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&deskew_lock);
|
||||
done_deskew_lanes &= ~(ctx->deskew_lanes);
|
||||
mutex_unlock(&deskew_lock);
|
||||
|
||||
new_lanes = ctx->deskew_lanes & ~enabled_deskew_lanes;
|
||||
if (new_lanes) {
|
||||
set_enabled_with_lock(new_lanes);
|
||||
nvcsi_deskew_setup_start(new_lanes);
|
||||
init_completion(&ctx->thread_done);
|
||||
ctx->deskew_kthread = kthread_run(nvcsi_deskew_thread,
|
||||
ctx, "deskew");
|
||||
if (IS_ERR(ctx->deskew_kthread)) {
|
||||
ret = PTR_ERR(ctx->deskew_kthread);
|
||||
complete(&ctx->thread_done);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(nvcsi_deskew_setup);
|
||||
|
||||
static inline unsigned int checkpass(unsigned long long stat)
|
||||
{
|
||||
/* Due to bug 200098288, use
|
||||
* mask101 = 0x5; mask111 = 0x7; mask1010 = 0xa;
|
||||
* to check if current trimmer setting results in passing
|
||||
* Algorithm is explained in NVCSI_CIL_IAS chapter 5.3
|
||||
*/
|
||||
|
||||
return ((stat & 0x5) == 0x5 || ((stat & 0x7) == 0x7)
|
||||
|| (stat & 0xa) == 0xa);
|
||||
}
|
||||
|
||||
/* compute_boundary:
|
||||
* This function find the flipping point when the trimmer settings starts
|
||||
* to pass/fail.
|
||||
* Each graph represent the 64-bit status,trimmer setting 0~0x3F
|
||||
* from Right to Left.
|
||||
*
|
||||
* pf: pass to fail, fp: fail to pass
|
||||
*
|
||||
* pf fp
|
||||
* __|------|__
|
||||
* pf fp = 0
|
||||
* ____|-------
|
||||
* pf=0x3f fp
|
||||
* ----------|__
|
||||
*/
|
||||
static unsigned int compute_boundary(unsigned long long stat, unsigned int *x,
|
||||
unsigned int *w)
|
||||
{
|
||||
unsigned int was_pass, i = 0;
|
||||
int pf = -1, fp = -1;
|
||||
unsigned long long last_stat;
|
||||
|
||||
was_pass = checkpass(stat);
|
||||
fp = (was_pass == 1 ? 0 : -1);
|
||||
last_stat = stat;
|
||||
|
||||
while (i < 64) {
|
||||
if ((was_pass == 1) && (!(last_stat & 1))) {
|
||||
if (!checkpass(last_stat)) {
|
||||
pf = i;
|
||||
was_pass = 0;
|
||||
dev_dbg(mc_csi->dev, "pf %d\n", pf);
|
||||
}
|
||||
} else if ((was_pass == 0) && (last_stat & 1)) {
|
||||
if (checkpass(last_stat)) {
|
||||
fp = i;
|
||||
was_pass = 1;
|
||||
dev_dbg(mc_csi->dev, "fp %d\n", fp);
|
||||
}
|
||||
}
|
||||
i++;
|
||||
last_stat >>= 1;
|
||||
}
|
||||
|
||||
dev_dbg(mc_csi->dev, "fp %d pf %d\n", fp, pf);
|
||||
if (fp == -1 && pf == -1) {
|
||||
dev_dbg(mc_csi->dev, "No passing record found, please retry\n");
|
||||
return -EINVAL;
|
||||
} else if (pf == -1 && was_pass == 1)
|
||||
pf = 0x3f;
|
||||
|
||||
*x = pf;
|
||||
*w = fp;
|
||||
dev_dbg(mc_csi->dev, "%s: stats %llx, f2p %d, p2f %d",
|
||||
__func__, stat, fp, pf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int error_boundary(unsigned int phy_num, unsigned int cil_bit,
|
||||
unsigned int *x, unsigned int *w,
|
||||
unsigned int *y, unsigned int *z)
|
||||
{
|
||||
unsigned int stats_low = 0, stats_high = 0, stats_offset = 0;
|
||||
unsigned long long result = 0;
|
||||
unsigned int is_cilb = 0, is_io1 = 0;
|
||||
|
||||
is_cilb = (cil_bit > 1);
|
||||
is_io1 = (cil_bit % 2);
|
||||
dev_dbg(mc_csi->dev, "boundary for cilb?:%d io1?:%d\n",
|
||||
is_cilb, is_io1);
|
||||
stats_offset = is_cilb * NVCSI_CIL_B_OFFSET +
|
||||
is_io1 * NVCSI_DPHY_CALIB_STATUS_IO_OFFSET;
|
||||
/* step #1 clk lane */
|
||||
stats_low = nvcsi_phy_readl(phy_num, stats_offset +
|
||||
NVCSI_CIL_A_DPHY_DESKEW_CLK_CALIB_STATUS_LOW_0_0_OFFSET);
|
||||
stats_high = nvcsi_phy_readl(phy_num, stats_offset +
|
||||
NVCSI_CIL_A_DPHY_DESKEW_CLK_CALIB_STATUS_HIGH_0_0_OFFSET);
|
||||
result = ((unsigned long long)stats_high) << 32 | stats_low;
|
||||
|
||||
debugfs_deskew_clk_stats_low[cil_bit + phy_num * 4] = stats_low;
|
||||
debugfs_deskew_clk_stats_high[cil_bit + phy_num * 4] = stats_high;
|
||||
|
||||
dev_dbg(mc_csi->dev, "clk boundary: 0x%016llx\n", result);
|
||||
|
||||
if (compute_boundary(result, x, w))
|
||||
return -EINVAL;
|
||||
/* step #2 data lane */
|
||||
stats_low = nvcsi_phy_readl(phy_num, stats_offset +
|
||||
NVCSI_CIL_A_DPHY_DESKEW_DATA_CALIB_STATUS_LOW_0_0_OFFSET);
|
||||
stats_high = nvcsi_phy_readl(phy_num, stats_offset +
|
||||
NVCSI_CIL_A_DPHY_DESKEW_DATA_CALIB_STATUS_HIGH_0_0_OFFSET);
|
||||
result = ((unsigned long long)stats_high) << 32 | stats_low;
|
||||
|
||||
debugfs_deskew_data_stats_low[cil_bit + phy_num * 4] = stats_low;
|
||||
debugfs_deskew_data_stats_high[cil_bit + phy_num * 4] = stats_high;
|
||||
|
||||
dev_dbg(mc_csi->dev, "data boundary: 0x%016llx\n", result);
|
||||
if (compute_boundary(result, y, z))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
static void compute_trimmer(unsigned int *x, unsigned int *w,
|
||||
unsigned int *y, unsigned int *z,
|
||||
unsigned int *d, unsigned int *c)
|
||||
{
|
||||
int mid[4], base = 0;
|
||||
unsigned int i = 0;
|
||||
|
||||
|
||||
/* NVCSI_CIL_IAS Chapter 5.3 */
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (w[i] < z[i]) {
|
||||
y[i] = 0;
|
||||
z[i] = 0;
|
||||
} else if (w[i] > z[i]) {
|
||||
x[i] = 0;
|
||||
w[i] = 0;
|
||||
}
|
||||
mid[i] = ((y[i] + z[i]) - (x[i] + w[i])) >> 1;
|
||||
base = mid[i] < base ? mid[i] : base;
|
||||
}
|
||||
*c = -base;
|
||||
for (i = 0; i < 4; i++)
|
||||
d[i] = mid[i] - base;
|
||||
|
||||
/* debug prints */
|
||||
for (i = 0; i < 4; i++)
|
||||
dev_dbg(mc_csi->dev, "x %u w %u y %u z %u d %u\n",
|
||||
x[i], w[i], y[i], z[i], d[i]);
|
||||
dev_dbg(mc_csi->dev, "clk %u\n", *c);
|
||||
}
|
||||
static void set_trimmer(unsigned int phy_num, unsigned int cila,
|
||||
unsigned int cilb,
|
||||
unsigned int *d, unsigned int c)
|
||||
{
|
||||
unsigned int val = 0, val1 = 0;
|
||||
|
||||
if (cila && cilb) {
|
||||
/* 4-lane */
|
||||
val = SW_SET_DPHY_INADJ_CLK |
|
||||
SW_SET_DPHY_INADJ_IO0 |
|
||||
SW_SET_DPHY_INADJ_IO1 |
|
||||
(c << DPHY_INADJ_CLK_SHIFT) |
|
||||
(d[PHY_0_CIL_A_IO0] << DPHY_INADJ_IO0_SHIFT) |
|
||||
(d[PHY_0_CIL_A_IO1] << DPHY_INADJ_IO1_SHIFT);
|
||||
val1 = SW_SET_DPHY_INADJ_IO0 |
|
||||
SW_SET_DPHY_INADJ_IO1 |
|
||||
(d[PHY_0_CIL_B_IO0] << DPHY_INADJ_IO0_SHIFT)|
|
||||
(d[PHY_0_CIL_B_IO1] << DPHY_INADJ_IO1_SHIFT);
|
||||
nvcsi_phy_write(phy_num, NVCSI_CIL_A_DPHY_INADJ_CTRL_0_OFFSET,
|
||||
val);
|
||||
nvcsi_phy_write(phy_num, NVCSI_CIL_B_DPHY_INADJ_CTRL_0_OFFSET,
|
||||
val1);
|
||||
dev_dbg(mc_csi->dev, "cila %x cilb %x\n", val, val1);
|
||||
return;
|
||||
}
|
||||
/* TODO:
|
||||
* 2-lane and 1-lane cases cannot be verified since there
|
||||
* is no such sensor supported yet
|
||||
*/
|
||||
if (cila) {
|
||||
/* 2-lane and 1-lane*/
|
||||
val1 = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_A_DPHY_INADJ_CTRL_0_OFFSET);
|
||||
if (cila & NVCSI_PHY_0_NVCSI_CIL_A_IO0) {
|
||||
val1 &= ~(SW_SET_DPHY_INADJ_IO0 | DPHY_INADJ_IO0);
|
||||
val |= SW_SET_DPHY_INADJ_IO0 |
|
||||
(d[PHY_0_CIL_A_IO0] << DPHY_INADJ_IO0_SHIFT);
|
||||
|
||||
}
|
||||
if (cila & NVCSI_PHY_0_NVCSI_CIL_A_IO1) {
|
||||
val1 &= ~(SW_SET_DPHY_INADJ_IO1 | DPHY_INADJ_IO1);
|
||||
val |= SW_SET_DPHY_INADJ_IO1 |
|
||||
(d[PHY_0_CIL_A_IO1] << DPHY_INADJ_IO1_SHIFT);
|
||||
}
|
||||
val1 &= ~(SW_SET_DPHY_INADJ_CLK | DPHY_INADJ_CLK);
|
||||
val |= SW_SET_DPHY_INADJ_CLK | (c << DPHY_INADJ_CLK_SHIFT);
|
||||
|
||||
nvcsi_phy_write(phy_num, NVCSI_CIL_A_DPHY_INADJ_CTRL_0_OFFSET,
|
||||
val | val1);
|
||||
dev_dbg(mc_csi->dev, "cila %x\n", val | val1);
|
||||
} else {
|
||||
/* 2-lane and 1-lane*/
|
||||
val1 = nvcsi_phy_readl(phy_num,
|
||||
NVCSI_CIL_B_DPHY_INADJ_CTRL_0_OFFSET);
|
||||
if (cilb & NVCSI_PHY_0_NVCSI_CIL_B_IO0) {
|
||||
val1 &= ~(SW_SET_DPHY_INADJ_IO0 | DPHY_INADJ_IO0);
|
||||
val |= SW_SET_DPHY_INADJ_IO0 |
|
||||
(d[PHY_0_CIL_B_IO0] << DPHY_INADJ_IO0_SHIFT);
|
||||
}
|
||||
if (cilb & NVCSI_PHY_0_NVCSI_CIL_B_IO1) {
|
||||
val1 &= ~(SW_SET_DPHY_INADJ_IO1 | DPHY_INADJ_IO1);
|
||||
val |= SW_SET_DPHY_INADJ_IO1 |
|
||||
(d[PHY_0_CIL_B_IO1] << DPHY_INADJ_IO1_SHIFT);
|
||||
}
|
||||
val1 &= ~(SW_SET_DPHY_INADJ_CLK | DPHY_INADJ_CLK);
|
||||
val |= SW_SET_DPHY_INADJ_CLK | (c << DPHY_INADJ_CLK_SHIFT);
|
||||
nvcsi_phy_write(phy_num, NVCSI_CIL_B_DPHY_INADJ_CTRL_0_OFFSET,
|
||||
val | val1);
|
||||
dev_dbg(mc_csi->dev, "cilb %x\n", val | val1);
|
||||
}
|
||||
}
|
||||
|
||||
int nvcsi_deskew_apply_check(struct nvcsi_deskew_context *ctx)
|
||||
{
|
||||
unsigned long timeout = 0, timeleft = 1;
|
||||
|
||||
if (!completion_done(&ctx->thread_done)) {
|
||||
timeout = msecs_to_jiffies(DESKEW_TIMEOUT_MSEC);
|
||||
timeleft = wait_for_completion_timeout(&ctx->thread_done,
|
||||
timeout);
|
||||
}
|
||||
if (!timeleft)
|
||||
return -ETIMEDOUT;
|
||||
if (ctx->deskew_lanes ==
|
||||
(done_deskew_lanes & ctx->deskew_lanes)) {
|
||||
// sleep for a frame to make sure deskew result is reflected
|
||||
usleep_range(35*1000, 36*1000);
|
||||
return 0;
|
||||
} else
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(nvcsi_deskew_apply_check);
|
||||
|
||||
static int nvcsi_deskew_apply_helper(unsigned int active_lanes)
|
||||
{
|
||||
unsigned int phy_num = -1;
|
||||
unsigned int cil_lanes = 0, cila_io_lanes = 0, cilb_io_lanes = 0;
|
||||
unsigned int remaining_lanes = active_lanes;
|
||||
unsigned int i, j;
|
||||
|
||||
dev_dbg(mc_csi->dev, "%s: interrupt lane: %x\n",
|
||||
__func__, active_lanes);
|
||||
while (remaining_lanes) {
|
||||
unsigned int x[4] = {0, 0, 0, 0};
|
||||
unsigned int w[4] = {0, 0, 0, 0};
|
||||
unsigned int y[4] = {0, 0, 0, 0};
|
||||
unsigned int z[4] = {0, 0, 0, 0};
|
||||
unsigned int d_trimmer[4] = {0, 0, 0, 0};
|
||||
unsigned int clk_trimmer = 0;
|
||||
|
||||
phy_num++;
|
||||
cil_lanes = (active_lanes & (0xf << (phy_num * 4)))
|
||||
>> (phy_num * 4);
|
||||
remaining_lanes &= ~(0xf << (phy_num * 4));
|
||||
if (!cil_lanes)
|
||||
continue;
|
||||
cila_io_lanes = cil_lanes & (NVCSI_PHY_0_NVCSI_CIL_A_IO0
|
||||
| NVCSI_PHY_0_NVCSI_CIL_A_IO1);
|
||||
cilb_io_lanes = cil_lanes & (NVCSI_PHY_0_NVCSI_CIL_B_IO0
|
||||
| NVCSI_PHY_0_NVCSI_CIL_B_IO1);
|
||||
/* Step 1: Read status registers and compute error boundaries */
|
||||
for (i = NVCSI_PHY_0_NVCSI_CIL_A_IO0, j = 0;
|
||||
i <= NVCSI_PHY_0_NVCSI_CIL_B_IO1;
|
||||
i <<= 1, j++) {
|
||||
if ((cil_lanes & i) == 0)
|
||||
continue;
|
||||
if (error_boundary(phy_num, j,
|
||||
&x[j], &w[j],
|
||||
&y[j], &z[j]))
|
||||
return -EINVAL;
|
||||
}
|
||||
/*step 2: compute trimmer value based on error boundaries */
|
||||
compute_trimmer(x, w, y, z, d_trimmer, &clk_trimmer);
|
||||
/*step 3: Apply trimmer settings */
|
||||
set_trimmer(phy_num, cila_io_lanes, cilb_io_lanes,
|
||||
d_trimmer, clk_trimmer);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void deskew_dbgfs_calc_bound(struct seq_file *s, long long input_stats)
|
||||
{
|
||||
unsigned int x, w;
|
||||
|
||||
seq_printf(s, "input: %llx\n", input_stats);
|
||||
compute_boundary(input_stats, &x, &w);
|
||||
seq_printf(s, "setting: x %u w %u\n", x, w);
|
||||
}
|
||||
|
||||
|
||||
void deskew_dbgfs_deskew_stats(struct seq_file *s)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
|
||||
seq_puts(s, "clk stats\n");
|
||||
for (i = 0; i < NVCSI_PHY_CIL_NUM_LANE; i++) {
|
||||
seq_printf(s, "0x%08x 0x%08x\n",
|
||||
debugfs_deskew_clk_stats_high[i],
|
||||
debugfs_deskew_clk_stats_low[i]);
|
||||
}
|
||||
seq_puts(s, "data stats\n");
|
||||
for (i = 0; i < NVCSI_PHY_CIL_NUM_LANE; i++) {
|
||||
seq_printf(s, "0x%08x 0x%08x\n",
|
||||
debugfs_deskew_data_stats_high[i],
|
||||
debugfs_deskew_data_stats_low[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,19 +138,9 @@ struct nvcsi_deskew_context {
|
||||
struct completion thread_done;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_NVCSI)
|
||||
|
||||
int nvcsi_deskew_apply_check(struct nvcsi_deskew_context *ctx);
|
||||
int nvcsi_deskew_setup(struct nvcsi_deskew_context *ctx);
|
||||
#else
|
||||
static int inline nvcsi_deskew_apply_check(struct nvcsi_deskew_context *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static int inline nvcsi_deskew_setup(struct nvcsi_deskew_context *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void nvcsi_deskew_platform_setup(struct tegra_csi_device *dev, bool is_t19x);
|
||||
|
||||
|
||||
242
drivers/video/tegra/host/nvcsi/nvcsi-t194.c
Normal file
242
drivers/video/tegra/host/nvcsi/nvcsi-t194.c
Normal file
@@ -0,0 +1,242 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* NVCSI driver for T194
|
||||
*
|
||||
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "nvcsi-t194.h"
|
||||
#include <uapi/linux/nvhost_nvcsi_ioctl.h>
|
||||
#include <linux/tegra-camera-rtcpu.h>
|
||||
|
||||
#include <asm/ioctls.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/nvhost.h>
|
||||
|
||||
#include <media/mc_common.h>
|
||||
#include <media/tegra_camera_platform.h>
|
||||
#include "camera/nvcsi/csi5_fops.h"
|
||||
|
||||
#include "media/csi.h"
|
||||
|
||||
/* PG rate based on max ISP throughput */
|
||||
#define PG_CLK_RATE 102000000
|
||||
/* width of interface between VI and CSI */
|
||||
#define CSI_BUS_WIDTH 64
|
||||
/* number of lanes per brick */
|
||||
#define NUM_LANES 4
|
||||
|
||||
#define PHY_OFFSET 0x10000U
|
||||
#define CIL_A_SW_RESET 0x11024U
|
||||
#define CIL_B_SW_RESET 0x110b0U
|
||||
#define CSIA (1 << 20)
|
||||
#define CSIH (1 << 27)
|
||||
|
||||
static struct tegra_csi_device *mc_csi;
|
||||
struct t194_nvcsi {
|
||||
struct platform_device *pdev;
|
||||
struct tegra_csi_device csi;
|
||||
struct dentry *dir;
|
||||
};
|
||||
|
||||
struct nvhost_device_data t19_nvcsi_info = {
|
||||
.moduleid = 14, //NVHOST_MODULE_NVCSI,
|
||||
.clocks = {
|
||||
{"nvcsi", 400000000},
|
||||
},
|
||||
.devfs_name = "nvcsi",
|
||||
.autosuspend_delay = 500,
|
||||
.can_powergate = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id tegra194_nvcsi_of_match[] = {
|
||||
{
|
||||
.compatible = "nvidia,tegra194-nvcsi",
|
||||
.data = &t19_nvcsi_info,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra194_nvcsi_of_match);
|
||||
|
||||
struct t194_nvcsi_file_private {
|
||||
struct platform_device *pdev;
|
||||
};
|
||||
|
||||
static long t194_nvcsi_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
static int t194_nvcsi_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct nvhost_device_data *pdata = container_of(inode->i_cdev,
|
||||
struct nvhost_device_data, ctrl_cdev);
|
||||
struct platform_device *pdev = pdata->pdev;
|
||||
struct t194_nvcsi_file_private *filepriv;
|
||||
|
||||
filepriv = kzalloc(sizeof(*filepriv), GFP_KERNEL);
|
||||
if (unlikely(filepriv == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
filepriv->pdev = pdev;
|
||||
|
||||
file->private_data = filepriv;
|
||||
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
static int t194_nvcsi_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct t194_nvcsi_file_private *filepriv = file->private_data;
|
||||
|
||||
kfree(filepriv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations tegra194_nvcsi_ctrl_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
.unlocked_ioctl = t194_nvcsi_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = t194_nvcsi_ioctl,
|
||||
#endif
|
||||
.open = t194_nvcsi_open,
|
||||
.release = t194_nvcsi_release,
|
||||
};
|
||||
|
||||
int t194_nvcsi_early_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata;
|
||||
struct t194_nvcsi *nvcsi;
|
||||
|
||||
pdata = (void *)of_device_get_match_data(&pdev->dev);
|
||||
if (unlikely(pdata == NULL)) {
|
||||
dev_WARN(&pdev->dev, "no platform data\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
nvcsi = devm_kzalloc(&pdev->dev, sizeof(*nvcsi), GFP_KERNEL);
|
||||
if (!nvcsi)
|
||||
return -ENOMEM;
|
||||
|
||||
pdata->pdev = pdev;
|
||||
nvcsi->pdev = pdev;
|
||||
mutex_init(&pdata->lock);
|
||||
platform_set_drvdata(pdev, pdata);
|
||||
mc_csi = &nvcsi->csi;
|
||||
|
||||
pdata->private_data = nvcsi;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int t194_nvcsi_late_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
struct t194_nvcsi *nvcsi = pdata->private_data;
|
||||
struct tegra_camera_dev_info csi_info;
|
||||
int err;
|
||||
|
||||
memset(&csi_info, 0, sizeof(csi_info));
|
||||
csi_info.pdev = pdev;
|
||||
csi_info.hw_type = HWTYPE_CSI;
|
||||
csi_info.use_max = true;
|
||||
csi_info.bus_width = CSI_BUS_WIDTH;
|
||||
csi_info.lane_num = NUM_LANES;
|
||||
csi_info.pg_clk_rate = PG_CLK_RATE;
|
||||
err = tegra_camera_device_register(&csi_info, nvcsi);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
nvcsi->pdev = pdev;
|
||||
nvcsi->csi.fops = &csi5_fops;
|
||||
err = tegra_csi_media_controller_init(&nvcsi->csi, pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int t194_nvcsi_probe(struct platform_device *pdev)
|
||||
{
|
||||
int err;
|
||||
struct nvhost_device_data *pdata;
|
||||
struct t194_nvcsi *nvcsi;
|
||||
|
||||
err = t194_nvcsi_early_probe(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pdata = platform_get_drvdata(pdev);
|
||||
|
||||
nvcsi = pdata->private_data;
|
||||
|
||||
err = nvhost_client_device_get_resources(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = nvhost_module_init(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = nvhost_client_device_init(pdev);
|
||||
if (err) {
|
||||
nvhost_module_deinit(pdev);
|
||||
goto err_client_device_init;
|
||||
}
|
||||
|
||||
err = t194_nvcsi_late_probe(pdev);
|
||||
if (err)
|
||||
goto err_mediacontroller_init;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mediacontroller_init:
|
||||
nvhost_client_device_release(pdev);
|
||||
err_client_device_init:
|
||||
pdata->private_data = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __exit t194_nvcsi_remove(struct platform_device *dev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(dev);
|
||||
struct t194_nvcsi *nvcsi = pdata->private_data;
|
||||
|
||||
tegra_camera_device_unregister(nvcsi);
|
||||
mc_csi = NULL;
|
||||
tegra_csi_media_controller_remove(&nvcsi->csi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver t194_nvcsi_driver = {
|
||||
.probe = t194_nvcsi_probe,
|
||||
.remove = __exit_p(t194_nvcsi_remove),
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "t194-nvcsi",
|
||||
#ifdef CONFIG_OF
|
||||
.of_match_table = tegra194_nvcsi_of_match,
|
||||
#endif
|
||||
#ifdef CONFIG_PM
|
||||
.pm = &nvhost_module_pm_ops,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(t194_nvcsi_driver);
|
||||
MODULE_LICENSE("GPL");
|
||||
19
drivers/video/tegra/host/nvcsi/nvcsi-t194.h
Normal file
19
drivers/video/tegra/host/nvcsi/nvcsi-t194.h
Normal file
@@ -0,0 +1,19 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Tegra T194 Graphics Host NVCSI 2
|
||||
*
|
||||
* Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __NVHOST_NVCSI_T194_H__
|
||||
#define __NVHOST_NVCSI_T194_H__
|
||||
|
||||
struct file_operations;
|
||||
struct platform_device;
|
||||
|
||||
extern const struct file_operations tegra194_nvcsi_ctrl_ops;
|
||||
|
||||
int t194_nvcsi_early_probe(struct platform_device *pdev);
|
||||
int t194_nvcsi_late_probe(struct platform_device *pdev);
|
||||
|
||||
#endif
|
||||
140
drivers/video/tegra/host/nvcsi/nvcsi.c
Normal file
140
drivers/video/tegra/host/nvcsi/nvcsi.c
Normal file
@@ -0,0 +1,140 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* NVCSI driver
|
||||
*
|
||||
* Copyright (c) 2014-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/fs.h>
|
||||
#include <asm/ioctls.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <uapi/linux/nvhost_nvcsi_ioctl.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nvhost.h>
|
||||
|
||||
#include <media/mc_common.h>
|
||||
#include <media/csi.h>
|
||||
#include <media/tegra_camera_platform.h>
|
||||
|
||||
//#include "camera/nvcsi/csi5_fops.h"
|
||||
|
||||
#include "deskew.h"
|
||||
|
||||
#define PG_CLK_RATE 102000000
|
||||
/* width of interface between VI and CSI */
|
||||
#define CSI_BUS_WIDTH 64
|
||||
/* number of lanes per brick */
|
||||
#define NUM_LANES 4
|
||||
|
||||
#define CSIA (1 << 20)
|
||||
#define CSIF (1 << 25)
|
||||
|
||||
struct nvcsi {
|
||||
struct platform_device *pdev;
|
||||
struct regulator *regulator;
|
||||
struct tegra_csi_device csi;
|
||||
struct dentry *dir;
|
||||
};
|
||||
|
||||
static struct tegra_csi_device *mc_csi;
|
||||
|
||||
struct nvcsi_private {
|
||||
struct platform_device *pdev;
|
||||
struct nvcsi_deskew_context deskew_ctx;
|
||||
};
|
||||
|
||||
int nvcsi_cil_sw_reset(int lanes, int enable)
|
||||
{
|
||||
unsigned int phy_num = 0U;
|
||||
unsigned int val = enable ? (SW_RESET1_EN | SW_RESET0_EN) : 0U;
|
||||
unsigned int addr, i;
|
||||
|
||||
for (i = CSIA; i < CSIF; i = i << 2U) {
|
||||
if (lanes & i) {
|
||||
addr = CSI4_BASE_ADDRESS + NVCSI_CIL_A_SW_RESET +
|
||||
(CSI4_PHY_OFFSET * phy_num);
|
||||
host1x_writel(mc_csi->pdev, addr, val);
|
||||
}
|
||||
if (lanes & (i << 1U)) {
|
||||
addr = CSI4_BASE_ADDRESS + NVCSI_CIL_B_SW_RESET +
|
||||
(CSI4_PHY_OFFSET * phy_num);
|
||||
host1x_writel(mc_csi->pdev, addr, val);
|
||||
}
|
||||
phy_num++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvcsi_cil_sw_reset);
|
||||
|
||||
static long nvcsi_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct nvcsi_private *priv = file->private_data;
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
// sensor must be turned on before calling this ioctl, and streaming
|
||||
// should be started shortly after.
|
||||
case NVHOST_NVCSI_IOCTL_DESKEW_SETUP: {
|
||||
unsigned int active_lanes;
|
||||
|
||||
dev_dbg(mc_csi->dev, "ioctl: deskew_setup\n");
|
||||
priv->deskew_ctx.deskew_lanes = get_user(active_lanes,
|
||||
(long __user *)arg);
|
||||
ret = nvcsi_deskew_setup(&priv->deskew_ctx);
|
||||
return ret;
|
||||
}
|
||||
case NVHOST_NVCSI_IOCTL_DESKEW_APPLY: {
|
||||
dev_dbg(mc_csi->dev, "ioctl: deskew_apply\n");
|
||||
ret = nvcsi_deskew_apply_check(&priv->deskew_ctx);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
static int nvcsi_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct nvhost_device_data *pdata = container_of(inode->i_cdev,
|
||||
struct nvhost_device_data, ctrl_cdev);
|
||||
struct platform_device *pdev = pdata->pdev;
|
||||
struct nvcsi_private *priv;
|
||||
|
||||
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (unlikely(priv == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
priv->pdev = pdev;
|
||||
|
||||
file->private_data = priv;
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
static int nvcsi_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct nvcsi_private *priv = file->private_data;
|
||||
|
||||
kfree(priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations tegra_nvcsi_ctrl_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
.unlocked_ioctl = nvcsi_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = nvcsi_ioctl,
|
||||
#endif
|
||||
.open = nvcsi_open,
|
||||
.release = nvcsi_release,
|
||||
};
|
||||
MODULE_LICENSE("GPL");
|
||||
@@ -18,14 +18,6 @@ extern const struct file_operations tegra_nvcsi_ctrl_ops;
|
||||
int nvcsi_finalize_poweron(struct platform_device *pdev);
|
||||
int nvcsi_prepare_poweroff(struct platform_device *pdev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_NVCSI)
|
||||
int nvcsi_cil_sw_reset(int lanes, int enable);
|
||||
#else
|
||||
static int inline nvcsi_cil_sw_reset(int lanes, int enable)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct tegra_csi_device *tegra_get_mc_csi(void);
|
||||
#endif
|
||||
|
||||
12
drivers/video/tegra/host/vi/Makefile
Normal file
12
drivers/video/tegra/host/vi/Makefile
Normal file
@@ -0,0 +1,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/drivers/gpu/host1x/hw/
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/drivers/gpu/host1x/include
|
||||
LINUXINCLUDE += -I$(srctree.nvidia)/include
|
||||
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/host
|
||||
LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
|
||||
LINUXINCLUDE += -DCONFIG_TEGRA_HOST1X
|
||||
|
||||
nvhost-vi5-objs := vi5.o
|
||||
obj-m += nvhost-vi5.o
|
||||
978
drivers/video/tegra/host/vi/nvhost.h
Normal file
978
drivers/video/tegra/host/vi/nvhost.h
Normal file
@@ -0,0 +1,978 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Tegra graphics host driver
|
||||
*
|
||||
* Copyright (c) 2009-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_NVHOST_H
|
||||
#define __LINUX_NVHOST_H
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
#include <uapi/linux/nvdev_fence.h>
|
||||
|
||||
#ifdef CONFIG_TEGRA_HOST1X
|
||||
#include <linux/host1x.h>
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST) && IS_ENABLED(CONFIG_TEGRA_HOST1X)
|
||||
#error "Unable to enable TEGRA_GRHOST or TEGRA_HOST1X at the same time!"
|
||||
#endif
|
||||
|
||||
struct tegra_bwmgr_client;
|
||||
|
||||
struct nvhost_channel;
|
||||
struct nvhost_master;
|
||||
struct nvhost_cdma;
|
||||
struct nvhost_hwctx;
|
||||
struct nvhost_device_power_attr;
|
||||
struct nvhost_device_profile;
|
||||
struct mem_mgr;
|
||||
struct nvhost_as_moduleops;
|
||||
struct nvhost_ctrl_sync_fence_info;
|
||||
struct nvhost_sync_timeline;
|
||||
struct nvhost_sync_pt;
|
||||
enum nvdev_fence_kind;
|
||||
struct nvdev_fence;
|
||||
struct sync_pt;
|
||||
struct dma_fence;
|
||||
struct nvhost_fence;
|
||||
|
||||
#define NVHOST_MODULE_MAX_CLOCKS 8
|
||||
#define NVHOST_MODULE_MAX_SYNCPTS 16
|
||||
#define NVHOST_MODULE_MAX_WAITBASES 3
|
||||
#define NVHOST_MODULE_MAX_MODMUTEXES 5
|
||||
#define NVHOST_MODULE_MAX_IORESOURCE_MEM 5
|
||||
#define NVHOST_NAME_SIZE 24
|
||||
#define NVSYNCPT_INVALID (-1)
|
||||
|
||||
#define NVSYNCPT_AVP_0 (10) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_3D (22) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_VBLANK0 (26) /* t20, t30, t114, t148 */
|
||||
#define NVSYNCPT_VBLANK1 (27) /* t20, t30, t114, t148 */
|
||||
|
||||
#define NVMODMUTEX_ISP_0 (1) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_ISP_1 (2) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_NVJPG (3) /* t210 */
|
||||
#define NVMODMUTEX_NVDEC (4) /* t210 */
|
||||
#define NVMODMUTEX_MSENC (5) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_TSECA (6) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_TSECB (7) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI (8) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI_0 (8) /* t148 */
|
||||
#define NVMODMUTEX_VIC (10) /* t124, t132, t210 */
|
||||
#define NVMODMUTEX_VI_1 (11) /* t124, t132, t210 */
|
||||
|
||||
enum nvhost_power_sysfs_attributes {
|
||||
NVHOST_POWER_SYSFS_ATTRIB_AUTOSUSPEND_DELAY,
|
||||
NVHOST_POWER_SYSFS_ATTRIB_FORCE_ON,
|
||||
NVHOST_POWER_SYSFS_ATTRIB_MAX
|
||||
};
|
||||
|
||||
struct nvhost_notification {
|
||||
struct { /* 0000- */
|
||||
__u32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 */
|
||||
} time_stamp; /* -0007 */
|
||||
__u32 info32; /* info returned depends on method 0008-000b */
|
||||
#define NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT 8
|
||||
#define NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY 13
|
||||
#define NVHOST_CHANNEL_GR_SEMAPHORE_TIMEOUT 24
|
||||
#define NVHOST_CHANNEL_GR_ILLEGAL_NOTIFY 25
|
||||
#define NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT 31
|
||||
#define NVHOST_CHANNEL_PBDMA_ERROR 32
|
||||
#define NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR 43
|
||||
__u16 info16; /* info returned depends on method 000c-000d */
|
||||
__u16 status; /* user sets bit 15, NV sets status 000e-000f */
|
||||
#define NVHOST_CHANNEL_SUBMIT_TIMEOUT 1
|
||||
};
|
||||
|
||||
struct nvhost_gating_register {
|
||||
u64 addr;
|
||||
u32 prod;
|
||||
u32 disable;
|
||||
};
|
||||
|
||||
struct nvhost_actmon_register {
|
||||
u32 addr;
|
||||
u32 val;
|
||||
};
|
||||
|
||||
enum tegra_emc_request_type {
|
||||
TEGRA_SET_EMC_FLOOR, /* lower bound */
|
||||
TEGRA_SET_EMC_CAP, /* upper bound */
|
||||
TEGRA_SET_EMC_ISO_CAP, /* upper bound that affects ISO Bw */
|
||||
TEGRA_SET_EMC_SHARED_BW, /* shared bw request */
|
||||
TEGRA_SET_EMC_SHARED_BW_ISO, /* for use by ISO Mgr only */
|
||||
TEGRA_SET_EMC_REQ_COUNT /* Should always be last */
|
||||
};
|
||||
|
||||
struct nvhost_clock {
|
||||
char *name;
|
||||
unsigned long default_rate;
|
||||
u32 moduleid;
|
||||
enum tegra_emc_request_type request_type;
|
||||
bool disable_scaling;
|
||||
unsigned long devfreq_rate;
|
||||
};
|
||||
|
||||
struct nvhost_vm_hwid {
|
||||
u64 addr;
|
||||
bool dynamic;
|
||||
u32 shift;
|
||||
};
|
||||
|
||||
/*
|
||||
* Defines HW and SW class identifiers.
|
||||
*
|
||||
* This is module ID mapping between userspace and kernelspace.
|
||||
* The values of enum entries' are referred from NvRmModuleID enum defined
|
||||
* in below userspace file:
|
||||
* $TOP/vendor/nvidia/tegra/core/include/nvrm_module.h
|
||||
* Please make sure each entry below has same value as set in above file.
|
||||
*/
|
||||
enum nvhost_module_identifier {
|
||||
|
||||
/* Specifies external memory (DDR RAM, etc) */
|
||||
NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER = 75,
|
||||
|
||||
/* Specifies CBUS floor client module */
|
||||
NVHOST_MODULE_ID_CBUS_FLOOR = 119,
|
||||
|
||||
/* Specifies shared EMC client module */
|
||||
NVHOST_MODULE_ID_EMC_SHARED,
|
||||
NVHOST_MODULE_ID_MAX
|
||||
};
|
||||
|
||||
enum nvhost_resource_policy {
|
||||
RESOURCE_PER_DEVICE = 0,
|
||||
RESOURCE_PER_CHANNEL_INSTANCE,
|
||||
};
|
||||
|
||||
struct nvhost_device_data {
|
||||
int version; /* ip version number of device */
|
||||
int id; /* Separates clients of same hw */
|
||||
void __iomem *aperture[NVHOST_MODULE_MAX_IORESOURCE_MEM];
|
||||
struct device_dma_parameters dma_parms;
|
||||
|
||||
u32 modulemutexes[NVHOST_MODULE_MAX_MODMUTEXES];
|
||||
u32 moduleid; /* Module id for user space API */
|
||||
|
||||
/* interrupt ISR routine for falcon based engines */
|
||||
int (*flcn_isr)(struct platform_device *dev);
|
||||
int irq;
|
||||
int module_irq; /* IRQ bit from general intr reg for module intr */
|
||||
spinlock_t mirq_lock; /* spin lock for module irq */
|
||||
bool self_config_flcn_isr; /* skip setting up falcon interrupts */
|
||||
|
||||
/* Should we toggle the engine SLCG when we turn on the domain? */
|
||||
bool poweron_toggle_slcg;
|
||||
|
||||
/* Flag to set SLCG notifier (for the modules other than VIC) */
|
||||
bool slcg_notifier_enable;
|
||||
|
||||
/* Used to serialize channel when map-at-submit is used w/o mlocks */
|
||||
u32 last_submit_syncpt_id;
|
||||
u32 last_submit_syncpt_value;
|
||||
|
||||
bool power_on; /* If module is powered on */
|
||||
|
||||
u32 class; /* Device class */
|
||||
bool exclusive; /* True if only one user at a time */
|
||||
bool keepalive; /* Do not power gate when opened */
|
||||
bool serialize; /* Serialize submits in the channel */
|
||||
bool push_work_done; /* Push_op done into push buffer */
|
||||
bool poweron_reset; /* Reset the engine before powerup */
|
||||
bool virtual_dev; /* True if virtualized device */
|
||||
char *devfs_name; /* Name in devfs */
|
||||
char *devfs_name_family; /* Core of devfs name */
|
||||
|
||||
/* Support aborting the channel with close(channel_fd) */
|
||||
bool support_abort_on_close;
|
||||
|
||||
char *firmware_name; /* Name of firmware */
|
||||
bool firmware_not_in_subdir; /*
|
||||
* Firmware is not located in
|
||||
* chip subdirectory
|
||||
*/
|
||||
|
||||
bool engine_can_cg; /* True if CG is enabled */
|
||||
bool can_powergate; /* True if module can be power gated */
|
||||
int autosuspend_delay;/* Delay before power gated */
|
||||
struct nvhost_clock clocks[NVHOST_MODULE_MAX_CLOCKS];/* Clock names */
|
||||
|
||||
/* Clock gating registers */
|
||||
struct nvhost_gating_register *engine_cg_regs;
|
||||
|
||||
int num_clks; /* Number of clocks opened for dev */
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST)
|
||||
struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
|
||||
#else
|
||||
struct clk_bulk_data *clks;
|
||||
#endif
|
||||
struct mutex lock; /* Power management lock */
|
||||
struct list_head client_list; /* List of clients and rate requests */
|
||||
|
||||
int num_channels; /* Max num of channel supported */
|
||||
int num_mapped_chs; /* Num of channel mapped to device */
|
||||
int num_ppc; /* Number of pixels per clock cycle */
|
||||
|
||||
/* device node for channel operations */
|
||||
dev_t cdev_region;
|
||||
struct device *node;
|
||||
struct cdev cdev;
|
||||
|
||||
/* Address space device node */
|
||||
struct device *as_node;
|
||||
struct cdev as_cdev;
|
||||
|
||||
/* device node for ctrl block */
|
||||
struct class *nvhost_class;
|
||||
struct device *ctrl_node;
|
||||
struct cdev ctrl_cdev;
|
||||
const struct file_operations *ctrl_ops; /* ctrl ops for the module */
|
||||
|
||||
/* address space operations */
|
||||
const struct nvhost_as_moduleops *as_ops;
|
||||
|
||||
struct kobject *power_kobj; /* kobject to hold power sysfs entries */
|
||||
struct nvhost_device_power_attr *power_attrib; /* sysfs attributes */
|
||||
/* kobject to hold clk_cap sysfs entries */
|
||||
struct kobject clk_cap_kobj;
|
||||
struct kobj_attribute *clk_cap_attrs;
|
||||
struct dentry *debugfs; /* debugfs directory */
|
||||
|
||||
u32 nvhost_timeout_default;
|
||||
|
||||
/* Data for devfreq usage */
|
||||
struct devfreq *power_manager;
|
||||
/* Private device profile data */
|
||||
struct nvhost_device_profile *power_profile;
|
||||
/* Should we read load estimate from hardware? */
|
||||
bool actmon_enabled;
|
||||
/* Should we do linear emc scaling? */
|
||||
bool linear_emc;
|
||||
/* Offset to actmon registers */
|
||||
u32 actmon_regs;
|
||||
/* WEIGHT_COUNT of actmon */
|
||||
u32 actmon_weight_count;
|
||||
struct nvhost_actmon_register *actmon_setting_regs;
|
||||
/* Devfreq governor name */
|
||||
const char *devfreq_governor;
|
||||
unsigned long *freq_table;
|
||||
|
||||
/* Marks if the device is booted when pm runtime is disabled */
|
||||
bool booted;
|
||||
|
||||
/* Should be marked as true if nvhost shouldn't create device nodes */
|
||||
bool kernel_only;
|
||||
|
||||
void *private_data; /* private platform data */
|
||||
void *falcon_data; /* store the falcon info */
|
||||
struct platform_device *pdev; /* owner platform_device */
|
||||
void *virt_priv; /* private data for virtualized dev */
|
||||
#if IS_ENABLED(CONFIG_TEGRA_HOST1X)
|
||||
struct host1x *host1x; /* host1x device */
|
||||
#endif
|
||||
|
||||
struct mutex no_poweroff_req_mutex;
|
||||
struct dev_pm_qos_request no_poweroff_req;
|
||||
int no_poweroff_req_count;
|
||||
|
||||
struct notifier_block toggle_slcg_notifier;
|
||||
|
||||
struct rw_semaphore busy_lock;
|
||||
bool forced_idle;
|
||||
|
||||
/* Finalize power on. Can be used for context restore. */
|
||||
int (*finalize_poweron)(struct platform_device *dev);
|
||||
|
||||
/* Called each time we enter the class */
|
||||
int (*init_class_context)(struct platform_device *dev,
|
||||
struct nvhost_cdma *cdma);
|
||||
|
||||
/*
|
||||
* Reset the unit. Used for timeout recovery, resetting the unit on
|
||||
* probe and when un-powergating.
|
||||
*/
|
||||
void (*reset)(struct platform_device *dev);
|
||||
|
||||
/* Device is busy. */
|
||||
void (*busy)(struct platform_device *dev);
|
||||
|
||||
/* Device is idle. */
|
||||
void (*idle)(struct platform_device *dev);
|
||||
|
||||
/* Scaling init is run on device registration */
|
||||
void (*scaling_init)(struct platform_device *dev);
|
||||
|
||||
/* Scaling deinit is called on device unregistration */
|
||||
void (*scaling_deinit)(struct platform_device *dev);
|
||||
|
||||
/* Postscale callback is called after frequency change */
|
||||
void (*scaling_post_cb)(struct nvhost_device_profile *profile,
|
||||
unsigned long freq);
|
||||
|
||||
/* Preparing for power off. Used for context save. */
|
||||
int (*prepare_poweroff)(struct platform_device *dev);
|
||||
|
||||
/* paring for power off. Used for context save. */
|
||||
int (*aggregate_constraints)(struct platform_device *dev,
|
||||
int clk_index,
|
||||
unsigned long floor_rate,
|
||||
unsigned long pixel_rate,
|
||||
unsigned long bw_rate);
|
||||
|
||||
/*
|
||||
* Called after successful client device init. This can
|
||||
* be used in cases where the hardware specifics differ
|
||||
* between hardware revisions
|
||||
*/
|
||||
int (*hw_init)(struct platform_device *dev);
|
||||
|
||||
/* Used to add platform specific masks on reloc address */
|
||||
dma_addr_t (*get_reloc_phys_addr)(dma_addr_t phys_addr, u32 reloc_type);
|
||||
|
||||
/* Allocates a context handler for the device */
|
||||
struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
|
||||
struct nvhost_channel *ch);
|
||||
|
||||
/* engine specific init functions */
|
||||
int (*pre_virt_init)(struct platform_device *pdev);
|
||||
int (*post_virt_init)(struct platform_device *pdev);
|
||||
|
||||
/* engine specific functions */
|
||||
int (*memory_init)(struct platform_device *pdev);
|
||||
|
||||
phys_addr_t carveout_addr;
|
||||
phys_addr_t carveout_size;
|
||||
|
||||
/* Information related to engine-side synchronization */
|
||||
void *syncpt_unit_interface;
|
||||
|
||||
u64 transcfg_addr;
|
||||
u32 transcfg_val;
|
||||
u64 mamask_addr;
|
||||
u32 mamask_val;
|
||||
u64 borps_addr;
|
||||
u32 borps_val;
|
||||
struct nvhost_vm_hwid vm_regs[13];
|
||||
|
||||
/* Actmon IRQ from hintstatus_r */
|
||||
unsigned int actmon_irq;
|
||||
|
||||
/* Is the device already forced on? */
|
||||
bool forced_on;
|
||||
|
||||
/* Should we map channel at submit time? */
|
||||
bool resource_policy;
|
||||
|
||||
/* Should we enable context isolation for this device? */
|
||||
bool isolate_contexts;
|
||||
|
||||
/* channel user context list */
|
||||
struct mutex userctx_list_lock;
|
||||
struct list_head userctx_list;
|
||||
|
||||
/* reset control for this device */
|
||||
struct reset_control *reset_control;
|
||||
|
||||
/*
|
||||
* For loadable nvgpu module, we dynamically assign function
|
||||
* pointer of gk20a_debug_dump_device once the module loads
|
||||
*/
|
||||
void *debug_dump_data;
|
||||
void (*debug_dump_device)(void *dev);
|
||||
|
||||
/* icc client id for emc requests */
|
||||
int icc_id;
|
||||
|
||||
/* icc_path handle */
|
||||
struct icc_path *icc_path_handle;
|
||||
|
||||
/* bandwidth manager client id for emc requests */
|
||||
int bwmgr_client_id;
|
||||
|
||||
/* bandwidth manager handle */
|
||||
struct tegra_bwmgr_client *bwmgr_handle;
|
||||
|
||||
/* number of frames mlock can be locked for */
|
||||
u32 mlock_timeout_factor;
|
||||
|
||||
/* eventlib id for the device */
|
||||
int eventlib_id;
|
||||
|
||||
/* deliver task timestamps for falcon */
|
||||
void (*enable_timestamps)(struct platform_device *pdev,
|
||||
struct nvhost_cdma *cdma, dma_addr_t timestamp_addr);
|
||||
|
||||
/* enable risc-v boot */
|
||||
bool enable_riscv_boot;
|
||||
|
||||
/* store the risc-v info */
|
||||
void *riscv_data;
|
||||
|
||||
/* name of riscv descriptor binary */
|
||||
char *riscv_desc_bin;
|
||||
|
||||
/* name of riscv image binary */
|
||||
char *riscv_image_bin;
|
||||
|
||||
/* Flag to enable the debugfs to query module usage */
|
||||
bool enable_usage_debugfs;
|
||||
|
||||
/* Module clock cycles per actmon sample */
|
||||
u32 cycles_per_actmon_sample;
|
||||
};
|
||||
|
||||
|
||||
static inline
|
||||
struct nvhost_device_data *nvhost_get_devdata(struct platform_device *pdev)
|
||||
{
|
||||
return (struct nvhost_device_data *)platform_get_drvdata(pdev);
|
||||
}
|
||||
|
||||
static inline bool nvhost_dev_is_virtual(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
|
||||
return pdata->virtual_dev;
|
||||
}
|
||||
|
||||
struct nvhost_device_power_attr {
|
||||
struct platform_device *ndev;
|
||||
struct kobj_attribute power_attr[NVHOST_POWER_SYSFS_ATTRIB_MAX];
|
||||
};
|
||||
|
||||
int flcn_intr_init(struct platform_device *pdev);
|
||||
int flcn_reload_fw(struct platform_device *pdev);
|
||||
int nvhost_flcn_prepare_poweroff(struct platform_device *pdev);
|
||||
int nvhost_flcn_finalize_poweron(struct platform_device *dev);
|
||||
|
||||
/* common runtime pm and power domain APIs */
|
||||
int nvhost_module_init(struct platform_device *ndev);
|
||||
void nvhost_module_deinit(struct platform_device *dev);
|
||||
void nvhost_module_reset(struct platform_device *dev, bool reboot);
|
||||
void nvhost_module_idle(struct platform_device *dev);
|
||||
void nvhost_module_idle_mult(struct platform_device *pdev, int refs);
|
||||
int nvhost_module_busy(struct platform_device *dev);
|
||||
extern const struct dev_pm_ops nvhost_module_pm_ops;
|
||||
|
||||
void host1x_writel(struct platform_device *dev, u32 r, u32 v);
|
||||
u32 host1x_readl(struct platform_device *dev, u32 r);
|
||||
|
||||
/* common device management APIs */
|
||||
int nvhost_client_device_get_resources(struct platform_device *dev);
|
||||
int nvhost_client_device_release(struct platform_device *dev);
|
||||
int nvhost_client_device_init(struct platform_device *dev);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
u32 nvhost_get_syncpt_host_managed(struct platform_device *pdev,
|
||||
u32 param, const char *syncpt_name);
|
||||
u32 nvhost_get_syncpt_client_managed(struct platform_device *pdev,
|
||||
const char *syncpt_name);
|
||||
void nvhost_syncpt_put_ref_ext(struct platform_device *pdev, u32 id);
|
||||
bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id);
|
||||
void nvhost_syncpt_set_minval(struct platform_device *dev, u32 id, u32 val);
|
||||
void nvhost_syncpt_set_min_update(struct platform_device *pdev, u32 id, u32 val);
|
||||
int nvhost_syncpt_read_ext_check(struct platform_device *dev, u32 id, u32 *val);
|
||||
u32 nvhost_syncpt_read_maxval(struct platform_device *dev, u32 id);
|
||||
u32 nvhost_syncpt_incr_max_ext(struct platform_device *dev, u32 id, u32 incrs);
|
||||
int nvhost_syncpt_is_expired_ext(struct platform_device *dev, u32 id,
|
||||
u32 thresh);
|
||||
dma_addr_t nvhost_syncpt_address(struct platform_device *engine_pdev, u32 id);
|
||||
int nvhost_syncpt_unit_interface_init(struct platform_device *pdev);
|
||||
|
||||
/* public host1x interrupt management APIs */
|
||||
int nvhost_intr_register_notifier(struct platform_device *pdev,
|
||||
u32 id, u32 thresh,
|
||||
void (*callback)(void *, int),
|
||||
void *private_data);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
#ifdef CONFIG_TEGRA_HOST1X
|
||||
|
||||
static inline struct flcn *get_flcn(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
|
||||
return pdata ? pdata->falcon_data : NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_module_set_rate(struct platform_device *dev, void *priv,
|
||||
unsigned long constraint, int index,
|
||||
unsigned long attr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nvhost_module_add_client(struct platform_device *dev, void *priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_module_remove_client(struct platform_device *dev, void *priv) { }
|
||||
|
||||
static inline int nvhost_syncpt_get_cv_dev_address_table(struct platform_device *engine_pdev,
|
||||
int *count, dma_addr_t **table)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_debug_dump_device(struct platform_device *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *),
|
||||
void *data)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
|
||||
int num_cmdbufs, int num_relocs, int num_waitchks,
|
||||
int num_syncpts)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_job_put(struct nvhost_job *job) {}
|
||||
|
||||
static inline int nvhost_job_add_client_gather_address(struct nvhost_job *job,
|
||||
u32 num_words, u32 class_id, dma_addr_t gather_address)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int nvhost_channel_map(struct nvhost_device_data *pdata,
|
||||
struct nvhost_channel **ch,
|
||||
void *identifier)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void *nvhost_get_private_data(struct platform_device *_dev)
|
||||
{
|
||||
struct nvhost_device_data *pdata =
|
||||
(struct nvhost_device_data *)platform_get_drvdata(_dev);
|
||||
WARN_ON(!pdata);
|
||||
return pdata ? pdata->private_data : NULL;
|
||||
}
|
||||
|
||||
static inline struct nvhost_master *nvhost_get_host(
|
||||
struct platform_device *_dev)
|
||||
{
|
||||
struct device *parent = _dev->dev.parent;
|
||||
struct device *dev = &_dev->dev;
|
||||
|
||||
/*
|
||||
* host1x has no parent dev on non-DT configuration or has
|
||||
* platform_bus on DT configuration. So search for a device
|
||||
* whose parent is NULL or platform_bus
|
||||
*/
|
||||
while (parent && parent != &platform_bus) {
|
||||
dev = parent;
|
||||
parent = parent->parent;
|
||||
}
|
||||
|
||||
return nvhost_get_private_data(to_platform_device(dev));
|
||||
}
|
||||
|
||||
static inline int nvhost_channel_submit(struct nvhost_job *job)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void nvhost_putchannel(struct nvhost_channel *ch, int cnt) {}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_fence_get(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_put(struct nvhost_fence *fence) {}
|
||||
|
||||
static inline int nvhost_fence_num_pts(struct nvhost_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline dma_addr_t nvhost_t194_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline dma_addr_t nvhost_t23x_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_task(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thres,
|
||||
u64 timestamp_start,
|
||||
u64 timestamp_end)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_submit(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thresh,
|
||||
u64 timestamp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_eventlib_log_fences(struct platform_device *pdev,
|
||||
u32 task_syncpt_id,
|
||||
u32 task_syncpt_thresh,
|
||||
struct nvdev_fence *fences,
|
||||
u8 num_fences,
|
||||
enum nvdev_fence_kind kind,
|
||||
u64 timestamp)
|
||||
{
|
||||
}
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void nvhost_register_dump_device(
|
||||
struct platform_device *dev,
|
||||
void (*nvgpu_debug_dump_device)(void *),
|
||||
void *data);
|
||||
void nvhost_unregister_dump_device(struct platform_device *dev);
|
||||
#else
|
||||
static inline void nvhost_register_dump_device(
|
||||
struct platform_device *dev,
|
||||
void (*nvgpu_debug_dump_device)(void *),
|
||||
void *data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_unregister_dump_device(struct platform_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v);
|
||||
u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r);
|
||||
|
||||
void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v);
|
||||
u32 host1x_sync_readl(struct nvhost_master *dev, u32 r);
|
||||
|
||||
/* public host1x power management APIs */
|
||||
bool nvhost_module_powered_ext(struct platform_device *dev);
|
||||
/* This power ON only host1x and doesn't power ON module */
|
||||
int nvhost_module_busy_ext(struct platform_device *dev);
|
||||
/* This power OFF only host1x and doesn't power OFF module */
|
||||
void nvhost_module_idle_ext(struct platform_device *dev);
|
||||
|
||||
/* public api to return platform_device ptr to the default host1x instance */
|
||||
struct platform_device *nvhost_get_default_device(void);
|
||||
|
||||
/* Public PM nvhost APIs. */
|
||||
/* This power ON both host1x and module */
|
||||
int nvhost_module_busy(struct platform_device *dev);
|
||||
/* This power OFF both host1x and module */
|
||||
void nvhost_module_idle(struct platform_device *dev);
|
||||
|
||||
/* public api to register/unregister a subdomain */
|
||||
void nvhost_register_client_domain(struct generic_pm_domain *domain);
|
||||
void nvhost_unregister_client_domain(struct generic_pm_domain *domain);
|
||||
|
||||
int nvhost_module_add_client(struct platform_device *dev,
|
||||
void *priv);
|
||||
void nvhost_module_remove_client(struct platform_device *dev,
|
||||
void *priv);
|
||||
|
||||
int nvhost_module_set_rate(struct platform_device *dev, void *priv,
|
||||
unsigned long constraint, int index, unsigned long attr);
|
||||
|
||||
/* public APIs required to submit in-kernel work */
|
||||
int nvhost_channel_map(struct nvhost_device_data *pdata,
|
||||
struct nvhost_channel **ch,
|
||||
void *identifier);
|
||||
void nvhost_putchannel(struct nvhost_channel *ch, int cnt);
|
||||
/* Allocate memory for a job. Just enough memory will be allocated to
|
||||
* accommodate the submit announced in submit header.
|
||||
*/
|
||||
struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
|
||||
int num_cmdbufs, int num_relocs, int num_waitchks,
|
||||
int num_syncpts);
|
||||
/* Decrement reference job, free if goes to zero. */
|
||||
void nvhost_job_put(struct nvhost_job *job);
|
||||
|
||||
/* Add a gather with IOVA address to job */
|
||||
int nvhost_job_add_client_gather_address(struct nvhost_job *job,
|
||||
u32 num_words, u32 class_id, dma_addr_t gather_address);
|
||||
int nvhost_channel_submit(struct nvhost_job *job);
|
||||
|
||||
/* public host1x sync-point management APIs */
|
||||
u32 nvhost_get_syncpt_client_managed(struct platform_device *pdev,
|
||||
const char *syncpt_name);
|
||||
void nvhost_syncpt_get_ref_ext(struct platform_device *pdev, u32 id);
|
||||
const char *nvhost_syncpt_get_name(struct platform_device *dev, int id);
|
||||
void nvhost_syncpt_cpu_incr_ext(struct platform_device *dev, u32 id);
|
||||
int nvhost_syncpt_read_ext_check(struct platform_device *dev, u32 id, u32 *val);
|
||||
int nvhost_syncpt_wait_timeout_ext(struct platform_device *dev, u32 id, u32 thresh,
|
||||
u32 timeout, u32 *value, struct timespec64 *ts);
|
||||
int nvhost_syncpt_create_fence_single_ext(struct platform_device *dev,
|
||||
u32 id, u32 thresh, const char *name, int *fence_fd);
|
||||
void nvhost_syncpt_set_min_eq_max_ext(struct platform_device *dev, u32 id);
|
||||
int nvhost_syncpt_nb_pts_ext(struct platform_device *dev);
|
||||
bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id);
|
||||
u32 nvhost_syncpt_read_minval(struct platform_device *dev, u32 id);
|
||||
void nvhost_syncpt_set_maxval(struct platform_device *dev, u32 id, u32 val);
|
||||
int nvhost_syncpt_fd_get_ext(int fd, struct platform_device *pdev, u32 *id);
|
||||
|
||||
void nvhost_eventlib_log_task(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thres,
|
||||
u64 timestamp_start,
|
||||
u64 timestamp_end);
|
||||
|
||||
void nvhost_eventlib_log_submit(struct platform_device *pdev,
|
||||
u32 syncpt_id,
|
||||
u32 syncpt_thresh,
|
||||
u64 timestamp);
|
||||
|
||||
void nvhost_eventlib_log_fences(struct platform_device *pdev,
|
||||
u32 task_syncpt_id,
|
||||
u32 task_syncpt_thresh,
|
||||
struct nvdev_fence *fences,
|
||||
u8 num_fences,
|
||||
enum nvdev_fence_kind kind,
|
||||
u64 timestamp);
|
||||
|
||||
dma_addr_t nvhost_t194_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type);
|
||||
dma_addr_t nvhost_t23x_get_reloc_phys_addr(dma_addr_t phys_addr,
|
||||
u32 reloc_type);
|
||||
|
||||
/* public host1x interrupt management APIs */
|
||||
int nvhost_intr_register_fast_notifier(struct platform_device *pdev,
|
||||
u32 id, u32 thresh,
|
||||
void (*callback)(void *, int),
|
||||
void *private_data);
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST) && defined(CONFIG_DEBUG_FS)
|
||||
void nvhost_debug_dump_device(struct platform_device *pdev);
|
||||
#else
|
||||
static inline void nvhost_debug_dump_device(struct platform_device *pdev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST)
|
||||
const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn);
|
||||
#else
|
||||
static inline const struct firmware *
|
||||
nvhost_client_request_firmware(struct platform_device *dev,
|
||||
const char *fw_name, bool warn)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC)
|
||||
|
||||
int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *),
|
||||
void *data);
|
||||
|
||||
int nvhost_fence_get_pt(
|
||||
struct nvhost_fence *fence, size_t i,
|
||||
u32 *id, u32 *threshold);
|
||||
|
||||
struct nvhost_fence *nvhost_fence_create(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name);
|
||||
int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd);
|
||||
|
||||
struct nvhost_fence *nvhost_fence_get(int fd);
|
||||
struct nvhost_fence *nvhost_fence_dup(struct nvhost_fence *fence);
|
||||
int nvhost_fence_num_pts(struct nvhost_fence *fence);
|
||||
int nvhost_fence_install(struct nvhost_fence *fence, int fence_fd);
|
||||
void nvhost_fence_put(struct nvhost_fence *fence);
|
||||
void nvhost_fence_wait(struct nvhost_fence *fence, u32 timeout_in_ms);
|
||||
|
||||
#else
|
||||
|
||||
static inline int nvhost_fence_foreach_pt(
|
||||
struct nvhost_fence *fence,
|
||||
int (*iter)(struct nvhost_ctrl_sync_fence_info, void *d),
|
||||
void *d)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_create_fence(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_create_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct nvhost_fence *nvhost_fence_get(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_fence_num_pts(struct nvhost_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_put(struct nvhost_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void nvhost_fence_wait(struct nvhost_fence *fence, u32 timeout_in_ms)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) && !defined(CONFIG_SYNC)
|
||||
int nvhost_dma_fence_unpack(struct dma_fence *fence, u32 *id, u32 *threshold);
|
||||
bool nvhost_dma_fence_is_waitable(struct dma_fence *fence);
|
||||
#else
|
||||
static inline int nvhost_dma_fence_unpack(struct dma_fence *fence, u32 *id,
|
||||
u32 *threshold)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline bool nvhost_dma_fence_is_waitable(struct dma_fence *fence)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_GRHOST_SYNC) && defined(CONFIG_SYNC)
|
||||
struct sync_fence *nvhost_sync_fdget(int fd);
|
||||
int nvhost_sync_num_pts(struct sync_fence *fence);
|
||||
struct sync_fence *nvhost_sync_create_fence(struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts, const char *name);
|
||||
int nvhost_sync_create_fence_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd);
|
||||
int nvhost_sync_fence_set_name(int fence_fd, const char *name);
|
||||
u32 nvhost_sync_pt_id(struct sync_pt *__pt);
|
||||
u32 nvhost_sync_pt_thresh(struct sync_pt *__pt);
|
||||
struct sync_pt *nvhost_sync_pt_from_fence_index(struct sync_fence *fence,
|
||||
u32 sync_pt_index);
|
||||
#else
|
||||
static inline struct sync_fence *nvhost_sync_fdget(int fd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_num_pts(struct sync_fence *fence)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sync_fence *nvhost_sync_create_fence(struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts, const char *name)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_create_fence_fd(
|
||||
struct platform_device *pdev,
|
||||
struct nvhost_ctrl_sync_fence_info *pts,
|
||||
u32 num_pts,
|
||||
const char *name,
|
||||
s32 *fence_fd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int nvhost_sync_fence_set_name(int fence_fd, const char *name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_sync_pt_id(struct sync_pt *__pt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 nvhost_sync_pt_thresh(struct sync_pt *__pt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sync_pt *nvhost_sync_pt_from_fence_index(
|
||||
struct sync_fence *fence, u32 sync_pt_index)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Hacky way to get access to struct nvhost_device_data for VI device. */
|
||||
extern struct nvhost_device_data t20_vi_info;
|
||||
extern struct nvhost_device_data t30_vi_info;
|
||||
extern struct nvhost_device_data t11_vi_info;
|
||||
extern struct nvhost_device_data t14_vi_info;
|
||||
|
||||
int nvdec_do_idle(void);
|
||||
int nvdec_do_unidle(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
389
drivers/video/tegra/host/vi/vi5.c
Normal file
389
drivers/video/tegra/host/vi/vi5.c
Normal file
@@ -0,0 +1,389 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* VI5 driver
|
||||
*
|
||||
* Copyright (c) 2017-2022, NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <asm/ioctls.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <media/fusa-capture/capture-vi-channel.h>
|
||||
#include <soc/tegra/camrtc-capture.h>
|
||||
#include <linux/version.h>
|
||||
#include <soc/tegra/fuse.h>
|
||||
#include "vi5.h"
|
||||
#include <linux/platform/tegra/emc_bwmgr.h>
|
||||
#include "capture/capture-support.h"
|
||||
#include <media/vi.h>
|
||||
#include <media/mc_common.h>
|
||||
#include <media/tegra_camera_platform.h>
|
||||
#include <uapi/linux/nvhost_vi_ioctl.h>
|
||||
#include <linux/platform/tegra/latency_allowance.h>
|
||||
#include "nvhost.h"
|
||||
|
||||
/* HW capability, pixels per clock */
|
||||
#define NUM_PPC 8
|
||||
/* 15% bus protocol overhead */
|
||||
/* + 5% SW overhead */
|
||||
#define VI_OVERHEAD 20
|
||||
|
||||
struct host_vi5 {
|
||||
struct platform_device *pdev;
|
||||
struct platform_device *vi_thi;
|
||||
struct vi vi_common;
|
||||
|
||||
/* Debugfs */
|
||||
struct vi5_debug {
|
||||
struct debugfs_regset32 ch0;
|
||||
} debug;
|
||||
|
||||
/* WAR: Adding a temp flags to avoid registering to V4L2 and
|
||||
* tegra camera platform device.
|
||||
*/
|
||||
bool skip_v4l2_init;
|
||||
};
|
||||
|
||||
static int vi5_init_debugfs(struct host_vi5 *vi5);
|
||||
static void vi5_remove_debugfs(struct host_vi5 *vi5);
|
||||
|
||||
static int vi5_alloc_syncpt(struct platform_device *pdev,
|
||||
const char *name,
|
||||
uint32_t *syncpt_id)
|
||||
{
|
||||
struct host_vi5 *vi5 = nvhost_get_private_data(pdev);
|
||||
|
||||
return capture_alloc_syncpt(vi5->vi_thi, name, syncpt_id);
|
||||
}
|
||||
|
||||
int nvhost_vi5_aggregate_constraints(struct platform_device *dev,
|
||||
int clk_index,
|
||||
unsigned long floor_rate,
|
||||
unsigned long pixelrate,
|
||||
unsigned long bw_constraint)
|
||||
{
|
||||
struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
|
||||
|
||||
if (!pdata) {
|
||||
dev_err(&dev->dev,
|
||||
"No platform data, fall back to default policy\n");
|
||||
return 0;
|
||||
}
|
||||
if (!pixelrate || clk_index != 0)
|
||||
return 0;
|
||||
/* SCF send request using NVHOST_CLK, which is calculated
|
||||
* in floor_rate, so we need to aggregate its request
|
||||
* with V4L2 pixelrate request
|
||||
*/
|
||||
return floor_rate + (pixelrate / pdata->num_ppc);
|
||||
}
|
||||
|
||||
static void vi5_release_syncpt(struct platform_device *pdev, uint32_t id)
|
||||
{
|
||||
struct host_vi5 *vi5 = nvhost_get_private_data(pdev);
|
||||
|
||||
capture_release_syncpt(vi5->vi_thi, id);
|
||||
}
|
||||
|
||||
static void vi5_get_gos_table(struct platform_device *pdev, int *count,
|
||||
const dma_addr_t **table)
|
||||
{
|
||||
struct host_vi5 *vi5 = nvhost_get_private_data(pdev);
|
||||
|
||||
capture_get_gos_table(vi5->vi_thi, count, table);
|
||||
}
|
||||
|
||||
static int vi5_get_syncpt_gos_backing(struct platform_device *pdev,
|
||||
uint32_t id,
|
||||
dma_addr_t *syncpt_addr,
|
||||
uint32_t *gos_index,
|
||||
uint32_t *gos_offset)
|
||||
{
|
||||
struct host_vi5 *vi5 = nvhost_get_private_data(pdev);
|
||||
|
||||
return capture_get_syncpt_gos_backing(vi5->vi_thi, id,
|
||||
syncpt_addr, gos_index, gos_offset);
|
||||
}
|
||||
|
||||
static struct vi_channel_drv_ops vi5_channel_drv_ops = {
|
||||
.alloc_syncpt = vi5_alloc_syncpt,
|
||||
.release_syncpt = vi5_release_syncpt,
|
||||
.get_gos_table = vi5_get_gos_table,
|
||||
.get_syncpt_gos_backing = vi5_get_syncpt_gos_backing,
|
||||
};
|
||||
|
||||
int vi5_priv_early_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct nvhost_device_data *info;
|
||||
struct device_node *thi_np;
|
||||
struct platform_device *thi = NULL;
|
||||
struct host_vi5 *vi5;
|
||||
int err = 0;
|
||||
|
||||
info = (void *)of_device_get_match_data(dev);
|
||||
if (unlikely(info == NULL)) {
|
||||
dev_WARN(dev, "no platform data\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
thi_np = of_parse_phandle(dev->of_node, "nvidia,vi-falcon-device", 0);
|
||||
if (thi_np == NULL) {
|
||||
dev_WARN(dev, "missing %s handle\n", "nvidia,vi-falcon-device");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
thi = of_find_device_by_node(thi_np);
|
||||
of_node_put(thi_np);
|
||||
|
||||
if (thi == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
if (thi->dev.driver == NULL) {
|
||||
err = -EPROBE_DEFER;
|
||||
goto put_vi;
|
||||
}
|
||||
|
||||
err = vi_channel_drv_fops_register(&vi5_channel_drv_ops);
|
||||
if (err) {
|
||||
dev_warn(&pdev->dev, "syncpt fops register failed, defer probe\n");
|
||||
goto put_vi;
|
||||
}
|
||||
|
||||
vi5 = (struct host_vi5 *) devm_kzalloc(dev, sizeof(*vi5), GFP_KERNEL);
|
||||
if (!vi5) {
|
||||
err = -ENOMEM;
|
||||
goto put_vi;
|
||||
}
|
||||
|
||||
vi5->skip_v4l2_init = of_property_read_bool(dev->of_node,
|
||||
"nvidia,skip-v4l2-init");
|
||||
vi5->vi_thi = thi;
|
||||
vi5->pdev = pdev;
|
||||
info->pdev = pdev;
|
||||
mutex_init(&info->lock);
|
||||
platform_set_drvdata(pdev, info);
|
||||
info->private_data = vi5;
|
||||
|
||||
(void) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
|
||||
|
||||
return 0;
|
||||
|
||||
put_vi:
|
||||
platform_device_put(thi);
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "probe failed: %d\n", err);
|
||||
|
||||
info->private_data = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int vi5_priv_late_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct tegra_camera_dev_info vi_info;
|
||||
struct nvhost_device_data *info = platform_get_drvdata(pdev);
|
||||
struct host_vi5 *vi5 = info->private_data;
|
||||
int err;
|
||||
|
||||
memset(&vi_info, 0, sizeof(vi_info));
|
||||
vi_info.pdev = pdev;
|
||||
vi_info.hw_type = HWTYPE_VI;
|
||||
vi_info.ppc = NUM_PPC;
|
||||
vi_info.overhead = VI_OVERHEAD;
|
||||
|
||||
err = tegra_camera_device_register(&vi_info, vi5);
|
||||
if (err)
|
||||
goto device_release;
|
||||
|
||||
vi5_init_debugfs(vi5);
|
||||
|
||||
return 0;
|
||||
|
||||
device_release:
|
||||
nvhost_client_device_release(pdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vi5_probe(struct platform_device *pdev)
|
||||
{
|
||||
int err;
|
||||
struct nvhost_device_data *pdata;
|
||||
struct host_vi5 *vi5;
|
||||
|
||||
dev_dbg(&pdev->dev, "%s: probe %s\n", __func__, pdev->name);
|
||||
|
||||
err = vi5_priv_early_probe(pdev);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
pdata = platform_get_drvdata(pdev);
|
||||
vi5 = pdata->private_data;
|
||||
|
||||
err = nvhost_client_device_get_resources(pdev);
|
||||
if (err)
|
||||
goto put_vi;
|
||||
|
||||
err = nvhost_module_init(pdev);
|
||||
if (err)
|
||||
goto put_vi;
|
||||
|
||||
err = nvhost_client_device_init(pdev);
|
||||
if (err)
|
||||
goto deinit;
|
||||
|
||||
err = vi5_priv_late_probe(pdev);
|
||||
if (err)
|
||||
goto deinit;
|
||||
|
||||
return 0;
|
||||
|
||||
deinit:
|
||||
nvhost_module_deinit(pdev);
|
||||
put_vi:
|
||||
platform_device_put(vi5->vi_thi);
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "probe failed: %d\n", err);
|
||||
error:
|
||||
return err;
|
||||
}
|
||||
|
||||
struct t194_vi5_file_private {
|
||||
struct platform_device *pdev;
|
||||
struct tegra_mc_vi mc_vi;
|
||||
unsigned int vi_bypass_bw;
|
||||
};
|
||||
|
||||
static int vi5_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
|
||||
struct host_vi5 *vi5 = pdata->private_data;
|
||||
|
||||
tegra_camera_device_unregister(vi5);
|
||||
vi_channel_drv_unregister(&pdev->dev);
|
||||
tegra_vi_media_controller_cleanup(&vi5->vi_common.mc_vi);
|
||||
|
||||
vi5_remove_debugfs(vi5);
|
||||
platform_device_put(vi5->vi_thi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvhost_device_data t19_vi5_info = {
|
||||
.devfs_name = "vi",
|
||||
.moduleid = 2, //NVHOST_MODULE_VI,
|
||||
.clocks = {
|
||||
{"vi", UINT_MAX},
|
||||
{"emc", 0,
|
||||
NVHOST_MODULE_ID_EXTERNAL_MEMORY_CONTROLLER,
|
||||
TEGRA_SET_EMC_FLOOR, false, UINT_MAX}
|
||||
},
|
||||
.num_ppc = 8,
|
||||
.aggregate_constraints = nvhost_vi5_aggregate_constraints,
|
||||
.pre_virt_init = vi5_priv_early_probe,
|
||||
.post_virt_init = vi5_priv_late_probe,
|
||||
.bwmgr_client_id = TEGRA_BWMGR_CLIENT_VI,
|
||||
};
|
||||
|
||||
struct nvhost_device_data t23x_vi0_info = {
|
||||
.devfs_name = "vi0",
|
||||
.moduleid = 2, //NVHOST_MODULE_VI,
|
||||
.clocks = {
|
||||
{"vi", UINT_MAX},
|
||||
},
|
||||
.num_ppc = 8,
|
||||
.aggregate_constraints = nvhost_vi5_aggregate_constraints,
|
||||
.pre_virt_init = vi5_priv_early_probe,
|
||||
.post_virt_init = vi5_priv_late_probe,
|
||||
};
|
||||
|
||||
struct nvhost_device_data t23x_vi1_info = {
|
||||
.devfs_name = "vi1",
|
||||
.moduleid = 3, //NVHOST_MODULE_VI2,
|
||||
.clocks = {
|
||||
{"vi", UINT_MAX},
|
||||
},
|
||||
.num_ppc = 8,
|
||||
.aggregate_constraints = nvhost_vi5_aggregate_constraints,
|
||||
.pre_virt_init = vi5_priv_early_probe,
|
||||
.post_virt_init = vi5_priv_late_probe,
|
||||
};
|
||||
|
||||
static const struct of_device_id tegra_vi5_of_match[] = {
|
||||
{
|
||||
.name = "vi",
|
||||
.compatible = "nvidia,tegra194-vi",
|
||||
.data = &t19_vi5_info,
|
||||
},
|
||||
{
|
||||
.name = "vi0",
|
||||
.compatible = "nvidia,tegra234-vi",
|
||||
.data = &t23x_vi0_info,
|
||||
},
|
||||
{
|
||||
.name = "vi1",
|
||||
.compatible = "nvidia,tegra234-vi",
|
||||
.data = &t23x_vi1_info,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra_vi5_of_match);
|
||||
|
||||
static struct platform_driver vi5_driver = {
|
||||
.probe = vi5_probe,
|
||||
.remove = vi5_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "tegra194-vi5",
|
||||
#ifdef CONFIG_OF
|
||||
.of_match_table = tegra_vi5_of_match,
|
||||
#endif
|
||||
#ifdef CONFIG_PM
|
||||
.pm = &nvhost_module_pm_ops,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(vi5_driver);
|
||||
|
||||
/* === Debugfs ========================================================== */
|
||||
|
||||
static int vi5_init_debugfs(struct host_vi5 *vi5)
|
||||
{
|
||||
static const struct debugfs_reg32 vi5_ch_regs[] = {
|
||||
{ .name = "protocol_version", 0x00 },
|
||||
{ .name = "perforce_changelist", 0x4 },
|
||||
{ .name = "build_timestamp", 0x8 },
|
||||
{ .name = "channel_count", 0x80 },
|
||||
};
|
||||
struct nvhost_device_data *pdata = platform_get_drvdata(vi5->pdev);
|
||||
struct dentry *dir = pdata->debugfs;
|
||||
struct vi5_debug *debug = &vi5->debug;
|
||||
|
||||
debug->ch0.base = pdata->aperture[0];
|
||||
debug->ch0.regs = vi5_ch_regs;
|
||||
debug->ch0.nregs = ARRAY_SIZE(vi5_ch_regs);
|
||||
debugfs_create_regset32("ch0", S_IRUGO, dir, &debug->ch0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vi5_remove_debugfs(struct host_vi5 *vi5)
|
||||
{
|
||||
}
|
||||
MODULE_LICENSE("GPL");
|
||||
23
drivers/video/tegra/host/vi/vi5.h
Normal file
23
drivers/video/tegra/host/vi/vi5.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Tegra VI5
|
||||
*
|
||||
* Copyright (c) 2017-2022 NVIDIA Corporation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __NVHOST_VI5_H__
|
||||
#define __NVHOST_VI5_H__
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <media/mc_common.h>
|
||||
|
||||
int nvhost_vi5_aggregate_constraints(struct platform_device *dev,
|
||||
int clk_index,
|
||||
unsigned long floor_rate,
|
||||
unsigned long pixelrate,
|
||||
unsigned long bw_constraint);
|
||||
|
||||
int vi5_priv_early_probe(struct platform_device *pdev);
|
||||
int vi5_priv_late_probe(struct platform_device *pdev);
|
||||
|
||||
#endif
|
||||
51
include/uapi/linux/nvhost_isp_ioctl.h
Normal file
51
include/uapi/linux/nvhost_isp_ioctl.h
Normal file
@@ -0,0 +1,51 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Tegra ISP Driver
|
||||
*
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __UAPI_LINUX_NVHOST_ISP_IOCTL_H
|
||||
#define __UAPI_LINUX_NVHOST_ISP_IOCTL_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ISP_SOFT_ISO_CLIENT 0
|
||||
#define ISP_HARD_ISO_CLIENT 1
|
||||
|
||||
#if !defined(__KERNEL__)
|
||||
#define __user
|
||||
#endif
|
||||
|
||||
struct isp_emc {
|
||||
uint isp_bw;
|
||||
uint isp_clk;
|
||||
uint bpp_input;
|
||||
uint bpp_output;
|
||||
};
|
||||
|
||||
struct isp_la_bw {
|
||||
/* Total ISP write BW in MBps, either ISO peak BW or non-ISO avg BW */
|
||||
__u32 isp_la_bw;
|
||||
/* is ISO or non-ISO */
|
||||
bool is_iso;
|
||||
};
|
||||
|
||||
#define NVHOST_ISP_IOCTL_MAGIC 'I'
|
||||
|
||||
/*
|
||||
* /dev/nvhost-ctrl-isp devices
|
||||
*
|
||||
* Opening a '/dev/nvhost-ctrl-isp' device node creates a way to send
|
||||
* ctrl ioctl to isp driver.
|
||||
*
|
||||
* /dev/nvhost-isp is for channel (context specific) operations. We use
|
||||
* /dev/nvhost-ctrl-isp for global (context independent) operations on
|
||||
* isp device.
|
||||
*/
|
||||
|
||||
#define NVHOST_ISP_IOCTL_SET_ISP_LA_BW \
|
||||
_IOW(NVHOST_ISP_IOCTL_MAGIC, 4, struct isp_la_bw)
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user