Merge "host1x-nvhost: Integrate host1x-nvhost drivers from kernel/nvidia" into dev-main

This commit is contained in:
Gerrit Code Review
2023-04-04 21:56:39 -07:00
6 changed files with 1399 additions and 6 deletions

View File

@@ -1,10 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
#
# Tegra Host1x-Nvhost Driver.
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NOTE: Do not change or add anything in this makefile.
# The source code and makefile rules are copied from the
# kernel/nvidia/drivers/gpu/host1x-nvhost. This file is
# just place-holder for empty makefile to avoid any build
# issue when copy is not done from command line and building
# the tree independent of source copy.
ccflags-y += -I$(srctree.nvidia)/include
ccflags-y += -I$(srctree.nvidia)/include/uapi/linux
ccflags-y += -I$(srctree.nvidia)/drivers/gpu/host1x/include
ccflags-y += -DCONFIG_TEGRA_HOST1X
ccflags-y += -Werror
host1x-nvhost-objs = nvhost.o falcon.o
obj-m += host1x-nvhost.o

View File

@@ -0,0 +1,207 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2023, NVIDIA Corporation. All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/pci_ids.h>
#include <linux/iopoll.h>
#include "falcon.h"
enum falcon_memory {
FALCON_MEMORY_IMEM,
FALCON_MEMORY_DATA,
};
static void falcon_writel(struct falcon *falcon, u32 value, u32 offset)
{
writel(value, falcon->regs + offset);
}
int falcon_wait_idle(struct falcon *falcon)
{
u32 value;
return readl_poll_timeout(falcon->regs + FALCON_IDLESTATE, value,
(value == 0), 10, 100000);
}
static int falcon_dma_wait_idle(struct falcon *falcon)
{
u32 value;
return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
(value & FALCON_DMATRFCMD_IDLE), 10, 100000);
}
static int falcon_copy_chunk(struct falcon *falcon,
phys_addr_t base,
unsigned long offset,
enum falcon_memory target)
{
u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
if (target == FALCON_MEMORY_IMEM)
cmd |= FALCON_DMATRFCMD_IMEM;
falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
return falcon_dma_wait_idle(falcon);
}
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
u32 *virt = falcon->firmware.virt;
size_t i;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
if (bin->magic != PCI_VENDOR_ID_NVIDIA && bin->magic != 0x10fe) {
dev_err(falcon->dev, "incorrect firmware magic\n");
return -EINVAL;
}
/* currently only version 1 is supported */
if (bin->version != 1) {
dev_err(falcon->dev, "unsupported firmware version\n");
return -EINVAL;
}
/* check that the firmware size is consistent */
if (bin->size > falcon->firmware.size) {
dev_err(falcon->dev, "firmware image size inconsistency\n");
return -EINVAL;
}
os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
falcon->firmware.code.offset = os->code_offset;
falcon->firmware.code.size = os->code_size;
falcon->firmware.data.offset = os->data_offset;
falcon->firmware.data.size = os->data_size;
return 0;
}
int falcon_read_firmware(struct falcon *falcon, const char *name)
{
int err;
/* request_firmware prints error if it fails */
err = request_firmware(&falcon->firmware.firmware, name, falcon->dev);
if (err < 0)
return err;
falcon->firmware.size = falcon->firmware.firmware->size;
return 0;
}
int falcon_load_firmware(struct falcon *falcon)
{
const struct firmware *firmware = falcon->firmware.firmware;
int err;
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
/* parse the image data */
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
}
int falcon_init(struct falcon *falcon)
{
falcon->firmware.virt = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
}
int falcon_boot(struct falcon *falcon)
{
unsigned long offset;
u32 value;
int err;
if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
(value & (FALCON_DMACTL_IMEM_SCRUBBING |
FALCON_DMACTL_DMEM_SCRUBBING)) == 0,
10, 10000);
if (err < 0)
return err;
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
/* copy the data segment into Falcon internal memory */
for (offset = 0; offset < falcon->firmware.data.size; offset += 256)
falcon_copy_chunk(falcon,
falcon->firmware.data.offset + offset,
offset, FALCON_MEMORY_DATA);
/* copy the code segment into Falcon internal memory */
for (offset = 0; offset < falcon->firmware.code.size; offset += 256)
falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
offset, FALCON_MEMORY_IMEM);
/* enable interface */
falcon_writel(falcon, FALCON_ITFEN_MTHDEN |
FALCON_ITFEN_CTXEN,
FALCON_ITFEN);
/* boot falcon */
falcon_writel(falcon, 0x00000000, FALCON_BOOTVEC);
falcon_writel(falcon, FALCON_CPUCTL_STARTCPU, FALCON_CPUCTL);
err = falcon_wait_idle(falcon);
if (err < 0) {
dev_err(falcon->dev, "Falcon boot failed due to timeout\n");
return err;
}
return 0;
}
void falcon_execute_method(struct falcon *falcon, u32 method, u32 data)
{
falcon_writel(falcon, method >> 2, FALCON_UCLASS_METHOD_OFFSET);
falcon_writel(falcon, data, FALCON_UCLASS_METHOD_DATA);
}

View File

@@ -0,0 +1,114 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2022, NVIDIA Corporation. All rights reserved.
*/
#ifndef _FALCON_H_
#define _FALCON_H_
#include <linux/types.h>
#define FALCON_UCLASS_METHOD_OFFSET 0x00000040
#define FALCON_UCLASS_METHOD_DATA 0x00000044
#define FALCON_IRQMSET 0x00001010
#define FALCON_IRQMSET_WDTMR (1 << 1)
#define FALCON_IRQMSET_HALT (1 << 4)
#define FALCON_IRQMSET_EXTERR (1 << 5)
#define FALCON_IRQMSET_SWGEN0 (1 << 6)
#define FALCON_IRQMSET_SWGEN1 (1 << 7)
#define FALCON_IRQMSET_EXT(v) (((v) & 0xff) << 8)
#define FALCON_IRQDEST 0x0000101c
#define FALCON_IRQDEST_HALT (1 << 4)
#define FALCON_IRQDEST_EXTERR (1 << 5)
#define FALCON_IRQDEST_SWGEN0 (1 << 6)
#define FALCON_IRQDEST_SWGEN1 (1 << 7)
#define FALCON_IRQDEST_EXT(v) (((v) & 0xff) << 8)
#define FALCON_ITFEN 0x00001048
#define FALCON_ITFEN_CTXEN (1 << 0)
#define FALCON_ITFEN_MTHDEN (1 << 1)
#define FALCON_IDLESTATE 0x0000104c
#define FALCON_CPUCTL 0x00001100
#define FALCON_CPUCTL_STARTCPU (1 << 1)
#define FALCON_BOOTVEC 0x00001104
#define FALCON_DMACTL 0x0000110c
#define FALCON_DMACTL_DMEM_SCRUBBING (1 << 1)
#define FALCON_DMACTL_IMEM_SCRUBBING (1 << 2)
#define FALCON_DMATRFBASE 0x00001110
#define FALCON_DMATRFMOFFS 0x00001114
#define FALCON_DMATRFCMD 0x00001118
#define FALCON_DMATRFCMD_IDLE (1 << 1)
#define FALCON_DMATRFCMD_IMEM (1 << 4)
#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
#define FALCON_DMATRFFBOFFS 0x0000111c
struct falcon_fw_bin_header_v1 {
u32 magic; /* 0x10de */
u32 version; /* version of bin format (1) */
u32 size; /* entire image size including this header */
u32 os_header_offset;
u32 os_data_offset;
u32 os_size;
};
struct falcon_fw_os_app_v1 {
u32 offset;
u32 size;
};
struct falcon_fw_os_header_v1 {
u32 code_offset;
u32 code_size;
u32 data_offset;
u32 data_size;
};
struct falcon_firmware_section {
unsigned long offset;
size_t size;
};
struct falcon_firmware {
/* Firmware after it is read but not loaded */
const struct firmware *firmware;
/* Raw firmware data */
dma_addr_t iova;
dma_addr_t phys;
void *virt;
size_t size;
/* Parsed firmware information */
struct falcon_firmware_section bin_data;
struct falcon_firmware_section data;
struct falcon_firmware_section code;
};
struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
struct falcon_firmware firmware;
};
int falcon_init(struct falcon *falcon);
void falcon_exit(struct falcon *falcon);
int falcon_read_firmware(struct falcon *falcon, const char *firmware_name);
int falcon_load_firmware(struct falcon *falcon);
int falcon_boot(struct falcon *falcon);
void falcon_execute_method(struct falcon *falcon, u32 method, u32 data);
int falcon_wait_idle(struct falcon *falcon);
#endif /* _FALCON_H_ */

View File

@@ -0,0 +1,961 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2023, NVIDIA Corporation. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/host1x-next.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/nvhost.h>
#include <linux/nvhost_t194.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "falcon.h"
#define TEGRA194_SYNCPT_PAGE_SIZE 0x1000
#define TEGRA194_SYNCPT_SHIM_BASE 0x60000000
#define TEGRA194_SYNCPT_SHIM_SIZE 0x00400000
#define TEGRA234_SYNCPT_PAGE_SIZE 0x10000
#define TEGRA234_SYNCPT_SHIM_BASE 0x60000000
#define TEGRA234_SYNCPT_SHIM_SIZE 0x04000000
#define THI_STREAMID0 0x00000030
#define THI_STREAMID1 0x00000034
#define NVHOST_NUM_CDEV 1
struct nvhost_syncpt_interface {
dma_addr_t base;
size_t size;
uint32_t page_size;
};
u32 host1x_readl(struct platform_device *pdev, u32 r)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
void __iomem *addr = pdata->aperture[0] + r;
return readl(addr);
}
EXPORT_SYMBOL(host1x_readl);
void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
void __iomem *addr = pdata->aperture[0] + r;
writel(v, addr);
}
EXPORT_SYMBOL(host1x_writel);
static const struct of_device_id host1x_match[] = {
{ .compatible = "nvidia,tegra194-host1x", },
{ .compatible = "nvidia,tegra234-host1x", },
{},
};
struct platform_device *nvhost_get_default_device(void)
{
struct platform_device *host1x_pdev;
struct device_node *np;
np = of_find_matching_node(NULL, host1x_match);
if (!np)
return NULL;
host1x_pdev = of_find_device_by_node(np);
if (!host1x_pdev)
return NULL;
return host1x_pdev;
}
EXPORT_SYMBOL(nvhost_get_default_device);
struct host1x *nvhost_get_host1x(struct platform_device *pdev)
{
struct platform_device *host1x_pdev;
struct host1x *host1x;
host1x_pdev = nvhost_get_default_device();
if (!host1x_pdev) {
dev_dbg(&pdev->dev, "host1x device not available\n");
return NULL;
}
host1x = platform_get_drvdata(host1x_pdev);
if (!host1x) {
dev_warn(&pdev->dev, "No platform data for host1x!\n");
return NULL;
}
return host1x;
}
EXPORT_SYMBOL(nvhost_get_host1x);
static struct device *nvhost_client_device_create(struct platform_device *pdev,
struct cdev *cdev,
const char *cdev_name,
dev_t devno,
const struct file_operations *ops)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct device *dev;
int err;
pdata->nvhost_class = class_create(THIS_MODULE, pdev->dev.of_node->name);
if (IS_ERR(pdata->nvhost_class)) {
dev_err(&pdev->dev, "failed to create class\n");
return ERR_CAST(pdata->nvhost_class);
}
cdev_init(cdev, ops);
cdev->owner = THIS_MODULE;
err = cdev_add(cdev, devno, 1);
if (err < 0) {
dev_err(&pdev->dev, "failed to add cdev\n");
class_destroy(pdata->nvhost_class);
return ERR_PTR(err);
}
dev = device_create(pdata->nvhost_class, &pdev->dev, devno, NULL,
(pdev->id <= 0) ? "nvhost-%s%s" : "nvhost-%s%s.%d",
cdev_name, pdev->dev.of_node->name, pdev->id);
if (IS_ERR(dev)) {
dev_err(&pdev->dev, "failed to create %s device\n", cdev_name);
class_destroy(pdata->nvhost_class);
cdev_del(cdev);
}
return dev;
}
int nvhost_client_device_get_resources(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
int err;
u32 i;
pdata->host1x = nvhost_get_host1x(pdev);
if (!pdata->host1x) {
dev_warn(&pdev->dev, "No platform data for host1x!\n");
return -ENODEV;
}
for (i = 0; i < pdev->num_resources; i++) {
void __iomem *regs = NULL;
struct resource *r;
r = platform_get_resource(pdev, IORESOURCE_MEM, i);
/* We've run out of mem resources */
if (!r)
break;
regs = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(regs)) {
err = PTR_ERR(regs);
goto fail;
}
pdata->aperture[i] = regs;
}
return 0;
fail:
dev_err(&pdev->dev, "failed to get register memory\n");
return err;
}
EXPORT_SYMBOL(nvhost_client_device_get_resources);
int nvhost_client_device_init(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
dev_t devno;
int err;
err = alloc_chrdev_region(&devno, 0, NVHOST_NUM_CDEV, "nvhost");
if (err < 0) {
dev_err(&pdev->dev, "failed to reserve chrdev region\n");
return err;
}
pdata->ctrl_node = nvhost_client_device_create(pdev, &pdata->ctrl_cdev,
"ctrl-", devno,
pdata->ctrl_ops);
if (IS_ERR(pdata->ctrl_node))
return PTR_ERR(pdata->ctrl_node);
pdata->cdev_region = devno;
return 0;
}
EXPORT_SYMBOL(nvhost_client_device_init);
int nvhost_client_device_release(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
if (!IS_ERR_OR_NULL(pdata->ctrl_node)) {
device_destroy(pdata->nvhost_class, pdata->ctrl_cdev.dev);
cdev_del(&pdata->ctrl_cdev);
class_destroy(pdata->nvhost_class);
}
unregister_chrdev_region(pdata->cdev_region, NVHOST_NUM_CDEV);
return 0;
}
EXPORT_SYMBOL(nvhost_client_device_release);
u32 nvhost_get_syncpt_host_managed(struct platform_device *pdev,
u32 param, const char *syncpt_name)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
sp = host1x_syncpt_alloc(pdata->host1x, 0, syncpt_name ? syncpt_name :
dev_name(&pdev->dev));
if (!sp)
return 0;
return host1x_syncpt_id(sp);
}
EXPORT_SYMBOL(nvhost_get_syncpt_host_managed);
u32 nvhost_get_syncpt_client_managed(struct platform_device *pdev,
const char *syncpt_name)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
sp = host1x_syncpt_alloc(pdata->host1x, HOST1X_SYNCPT_CLIENT_MANAGED,
syncpt_name ? syncpt_name :
dev_name(&pdev->dev));
if (!sp)
return 0;
return host1x_syncpt_id(sp);
}
EXPORT_SYMBOL_GPL(nvhost_get_syncpt_client_managed);
u32 nvhost_get_syncpt_gpu_managed(struct platform_device *pdev,
const char *syncpt_name)
{
return nvhost_get_syncpt_client_managed(pdev, syncpt_name);
}
EXPORT_SYMBOL_GPL(nvhost_get_syncpt_gpu_managed);
void nvhost_syncpt_put_ref_ext(struct platform_device *pdev, u32 id)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
if (WARN_ON(!sp))
return;
host1x_syncpt_put(sp);
}
EXPORT_SYMBOL(nvhost_syncpt_put_ref_ext);
bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *pdev, u32 id)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
if (!pdata || !pdata->host1x)
return -ENODEV;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
return sp ? true : false;
}
EXPORT_SYMBOL(nvhost_syncpt_is_valid_pt_ext);
int nvhost_syncpt_is_expired_ext(struct platform_device *pdev, u32 id,
u32 thresh)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
if (WARN_ON(!sp))
return true;
if (host1x_syncpt_wait(sp, thresh, 0, NULL))
return false;
return true;
}
EXPORT_SYMBOL(nvhost_syncpt_is_expired_ext);
void nvhost_syncpt_set_minval(struct platform_device *pdev, u32 id, u32 val)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
u32 cur;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
if (WARN_ON(!sp))
return;
cur = host1x_syncpt_read(sp);
while (cur++ != val)
host1x_syncpt_incr(sp);
}
EXPORT_SYMBOL(nvhost_syncpt_set_minval);
void nvhost_syncpt_set_min_update(struct platform_device *pdev, u32 id, u32 val)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
u32 cur;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
if (WARN_ON(!sp))
return;
cur = host1x_syncpt_read(sp);
while (cur++ != val)
host1x_syncpt_incr(sp);
host1x_syncpt_read(sp);
}
EXPORT_SYMBOL(nvhost_syncpt_set_min_update);
int nvhost_syncpt_read_ext_check(struct platform_device *pdev, u32 id, u32 *val)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
if (!sp)
return -EINVAL;
*val = host1x_syncpt_read(sp);
return 0;
}
EXPORT_SYMBOL(nvhost_syncpt_read_ext_check);
u32 nvhost_syncpt_read_maxval(struct platform_device *pdev, u32 id)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
if (WARN_ON(!sp))
return 0;
return host1x_syncpt_read_max(sp);
}
EXPORT_SYMBOL(nvhost_syncpt_read_maxval);
u32 nvhost_syncpt_incr_max_ext(struct platform_device *pdev, u32 id, u32 incrs)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct host1x_syncpt *sp;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
if (WARN_ON(!sp))
return 0;
return host1x_syncpt_incr_max(sp, incrs);
}
EXPORT_SYMBOL(nvhost_syncpt_incr_max_ext);
static int nvhost_syncpt_get_aperture(struct device_node *np, u64 *base,
size_t *size)
{
if (of_device_is_compatible(np, "nvidia,tegra194-host1x")) {
*base = TEGRA194_SYNCPT_SHIM_BASE;
*size = TEGRA194_SYNCPT_SHIM_SIZE;
return 0;
}
if (of_device_is_compatible(np, "nvidia,tegra234-host1x")) {
*base = TEGRA234_SYNCPT_SHIM_BASE;
*size = TEGRA234_SYNCPT_SHIM_SIZE;
return 0;
}
return -ENODEV;
}
static int nvhost_syncpt_get_page_size(struct device_node *np, uint32_t *size)
{
if (of_device_is_compatible(np, "nvidia,tegra194-host1x")) {
*size = TEGRA194_SYNCPT_PAGE_SIZE;
return 0;
}
if (of_device_is_compatible(np, "nvidia,tegra234-host1x")) {
*size = TEGRA234_SYNCPT_PAGE_SIZE;
return 0;
}
return -ENODEV;
}
u32 nvhost_syncpt_unit_interface_get_byte_offset_ext(struct platform_device *pdev,
u32 syncpt_id)
{
uint32_t size;
int err;
err = nvhost_syncpt_get_page_size(pdev->dev.of_node, &size);
if (WARN_ON(err < 0))
return 0;
return syncpt_id * size;
}
EXPORT_SYMBOL(nvhost_syncpt_unit_interface_get_byte_offset_ext);
u32 nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id)
{
struct platform_device *host1x_pdev;
host1x_pdev = nvhost_get_default_device();
if (WARN_ON(!host1x_pdev))
return 0;
return nvhost_syncpt_unit_interface_get_byte_offset_ext(host1x_pdev,
syncpt_id);
}
EXPORT_SYMBOL(nvhost_syncpt_unit_interface_get_byte_offset);
int nvhost_syncpt_unit_interface_get_aperture(struct platform_device *pdev,
u64 *base, size_t *size)
{
return nvhost_syncpt_get_aperture(pdev->dev.of_node, base, size);
}
EXPORT_SYMBOL(nvhost_syncpt_unit_interface_get_aperture);
int nvhost_syncpt_unit_interface_init(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvhost_syncpt_interface *syncpt_if;
u64 base;
int err;
syncpt_if = devm_kzalloc(&pdev->dev, sizeof(*syncpt_if), GFP_KERNEL);
if (!syncpt_if)
return -ENOMEM;
err = nvhost_syncpt_get_aperture(pdev->dev.parent->of_node, &base,
&syncpt_if->size);
if (err < 0) {
dev_err(&pdev->dev, "failed to get syncpt aperture\n");
return err;
}
err = nvhost_syncpt_get_page_size(pdev->dev.parent->of_node,
&syncpt_if->page_size);
if (err < 0) {
dev_err(&pdev->dev, "failed to get syncpt page size\n");
return err;
}
/* If IOMMU is enabled, map it into the device memory */
if (iommu_get_domain_for_dev(&pdev->dev)) {
syncpt_if->base = dma_map_resource(&pdev->dev, base,
syncpt_if->size,
DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(&pdev->dev, syncpt_if->base))
return -ENOMEM;
} else {
syncpt_if->base = base;
}
pdata->syncpt_unit_interface = syncpt_if;
dev_info(&pdev->dev,
"syncpt_unit_base %llx syncpt_unit_size %zx size %x\n",
base, syncpt_if->size, syncpt_if->page_size);
return 0;
}
EXPORT_SYMBOL(nvhost_syncpt_unit_interface_init);
void nvhost_syncpt_unit_interface_deinit(struct platform_device *pdev)
{
struct nvhost_syncpt_interface *syncpt_if;
struct nvhost_device_data *pdata;
if (iommu_get_domain_for_dev(&pdev->dev)) {
pdata = platform_get_drvdata(pdev);
syncpt_if = pdata->syncpt_unit_interface;
dma_unmap_resource(&pdev->dev, syncpt_if->base, syncpt_if->size,
DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
}
}
EXPORT_SYMBOL(nvhost_syncpt_unit_interface_deinit);
dma_addr_t nvhost_syncpt_address(struct platform_device *pdev, u32 id)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvhost_syncpt_interface *syncpt_if = pdata->syncpt_unit_interface;
return syncpt_if->base + syncpt_if->page_size * id;
}
EXPORT_SYMBOL(nvhost_syncpt_address);
static irqreturn_t flcn_isr(int irq, void *dev_id)
{
struct platform_device *pdev = (struct platform_device *)(dev_id);
struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
if (pdata->flcn_isr)
pdata->flcn_isr(pdev);
return IRQ_HANDLED;
}
int flcn_intr_init(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
int ret = 0;
pdata->irq = platform_get_irq(pdev, 0);
if (pdata->irq < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
return -ENXIO;
}
spin_lock_init(&pdata->mirq_lock);
ret = devm_request_irq(&pdev->dev, pdata->irq, flcn_isr, 0,
dev_name(&pdev->dev), pdev);
if (ret) {
dev_err(&pdev->dev, "failed to request irq. err %d\n", ret);
return ret;
}
/* keep irq disabled */
disable_irq(pdata->irq);
return 0;
}
EXPORT_SYMBOL(flcn_intr_init);
int flcn_reload_fw(struct platform_device *pdev)
{
/* TODO: Used by debugfs */
return -EOPNOTSUPP;
}
EXPORT_SYMBOL(flcn_reload_fw);
static int nvhost_flcn_init(struct platform_device *pdev,
struct nvhost_device_data *pdata)
{
struct falcon *falcon;
falcon = devm_kzalloc(&pdev->dev, sizeof(*falcon), GFP_KERNEL);
if (!falcon)
return -ENOMEM;
falcon->dev = &pdev->dev;
falcon->regs = pdata->aperture[0];
falcon_init(falcon);
pdata->falcon_data = falcon;
return 0;
}
int nvhost_flcn_prepare_poweroff(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
if (pdata->flcn_isr)
disable_irq(pdata->irq);
return 0;
}
EXPORT_SYMBOL(nvhost_flcn_prepare_poweroff);
static int nvhost_flcn_load_firmware(struct platform_device *pdev,
struct falcon *falcon,
char *firmware_name)
{
dma_addr_t iova;
size_t size;
void *virt;
int err;
if (falcon->firmware.virt)
return 0;
err = falcon_read_firmware(falcon, firmware_name);
if (err < 0)
return err;
size = falcon->firmware.size;
virt = dma_alloc_coherent(&pdev->dev, size, &iova, GFP_KERNEL);
if (!virt)
return -ENOMEM;
falcon->firmware.virt = virt;
falcon->firmware.iova = iova;
err = falcon_load_firmware(falcon);
if (err < 0)
goto cleanup;
return 0;
cleanup:
dma_free_coherent(&pdev->dev, size, virt, iova);
return err;
}
int nvhost_flcn_finalize_poweron(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
#ifdef CONFIG_IOMMU_API
struct iommu_fwspec *spec = dev_iommu_fwspec_get(&pdev->dev);
#endif
struct falcon *falcon;
int err;
u32 value;
if (!pdata->falcon_data) {
err = nvhost_flcn_init(pdev, pdata);
if (err < 0)
return -ENOMEM;
}
falcon = pdata->falcon_data;
err = nvhost_flcn_load_firmware(pdev, falcon, pdata->firmware_name);
if (err < 0)
return err;
#ifdef CONFIG_IOMMU_API
if (spec) {
host1x_writel(pdev, pdata->transcfg_addr, pdata->transcfg_val);
if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
host1x_writel(pdev, THI_STREAMID0, value);
host1x_writel(pdev, THI_STREAMID1, value);
}
}
#endif
err = falcon_boot(falcon);
if (err < 0)
return err;
err = falcon_wait_idle(falcon);
if (err < 0) {
dev_err(&pdev->dev, "falcon boot timed out\n");
return err;
}
if (pdata->flcn_isr)
enable_irq(pdata->irq);
return 0;
}
EXPORT_SYMBOL(nvhost_flcn_finalize_poweron);
struct nvhost_host1x_cb {
struct dma_fence_cb cb;
struct work_struct work;
void (*notifier)(void *data, int unused);
void *notifier_data;
};
static void nvhost_host1x_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct nvhost_host1x_cb *host1x_cb;
host1x_cb = container_of(cb, struct nvhost_host1x_cb, cb);
schedule_work(&host1x_cb->work);
dma_fence_put(f);
}
static void nvhost_intr_do_work(struct work_struct *work)
{
struct nvhost_host1x_cb *host1x_cb;
host1x_cb = container_of(work, struct nvhost_host1x_cb, work);
host1x_cb->notifier(host1x_cb->notifier_data, 0);
kfree_rcu(host1x_cb);
}
int nvhost_intr_register_notifier(struct platform_device *pdev,
u32 id, u32 thresh,
void (*callback)(void *data, int unused),
void *private_data)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct dma_fence *fence;
struct nvhost_host1x_cb *cb;
struct host1x_syncpt *sp;
int err;
sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id);
if (!sp)
return -EINVAL;
fence = host1x_fence_create(sp, thresh, true);
if (IS_ERR(fence)) {
pr_err("error %d during construction of fence!",
(int)PTR_ERR(fence));
return PTR_ERR(fence);
}
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb) {
dma_fence_put(fence);
return -ENOMEM;
}
INIT_WORK(&cb->work, nvhost_intr_do_work);
cb->notifier = callback;
cb->notifier_data = private_data;
err = dma_fence_add_callback(fence, &cb->cb, nvhost_host1x_cb_func);
if (err < 0) {
dma_fence_put(fence);
kfree(cb);
}
return err;
}
EXPORT_SYMBOL(nvhost_intr_register_notifier);
void nvhost_module_deinit(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct falcon *falcon = pdata->falcon_data;
pm_runtime_disable(&pdev->dev);
if (falcon) {
dma_free_coherent(&pdev->dev, falcon->firmware.size,
falcon->firmware.virt, falcon->firmware.iova);
falcon_exit(falcon);
}
debugfs_remove_recursive(pdata->debugfs);
}
EXPORT_SYMBOL(nvhost_module_deinit);
int nvhost_module_init(struct platform_device *pdev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
unsigned int i;
int err;
err = devm_clk_bulk_get_all(&pdev->dev, &pdata->clks);
if (err < 0) {
dev_err(&pdev->dev, "failed to get clocks %d\n", err);
return err;
}
pdata->num_clks = err;
for (i = 0; i < pdata->num_clks; i++) {
err = clk_set_rate(pdata->clks[i].clk, ULONG_MAX);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock rate!\n");
return err;
}
}
pdata->reset_control = devm_reset_control_get_exclusive_released(
&pdev->dev, NULL);
if (IS_ERR(pdata->reset_control)) {
dev_err(&pdev->dev, "failed to get reset\n");
return PTR_ERR(pdata->reset_control);
}
reset_control_acquire(pdata->reset_control);
if (err < 0) {
dev_err(&pdev->dev, "failed to acquire reset: %d\n", err);
return err;
}
err = clk_bulk_prepare_enable(pdata->num_clks, pdata->clks);
if (err < 0) {
reset_control_release(pdata->reset_control);
dev_err(&pdev->dev, "failed to enabled clocks: %d\n", err);
return err;
}
reset_control_reset(pdata->reset_control);
clk_bulk_disable_unprepare(pdata->num_clks, pdata->clks);
reset_control_release(pdata->reset_control);
if (pdata->autosuspend_delay) {
pm_runtime_set_autosuspend_delay(&pdev->dev,
pdata->autosuspend_delay);
pm_runtime_use_autosuspend(&pdev->dev);
}
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev))
return -EOPNOTSUPP;
pdata->debugfs = debugfs_create_dir(pdev->dev.of_node->name,
NULL);
return 0;
}
EXPORT_SYMBOL(nvhost_module_init);
static void nvhost_module_load_regs(struct platform_device *pdev, bool prod)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
struct nvhost_gating_register *regs = pdata->engine_cg_regs;
if (!regs)
return;
while (regs->addr) {
if (prod)
host1x_writel(pdev, regs->addr, regs->prod);
else
host1x_writel(pdev, regs->addr, regs->disable);
regs++;
}
}
void nvhost_module_reset(struct platform_device *pdev, bool reboot)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
int err;
if (reboot)
if (pdata->prepare_poweroff)
pdata->prepare_poweroff(pdev);
mutex_lock(&pdata->lock);
err = reset_control_acquire(pdata->reset_control);
if (err < 0) {
dev_err(&pdev->dev, "failed to acquire reset: %d\n", err);
} else {
reset_control_reset(pdata->reset_control);
reset_control_release(pdata->reset_control);
}
mutex_unlock(&pdata->lock);
if (reboot) {
/* Load clockgating registers */
nvhost_module_load_regs(pdev, pdata->engine_can_cg);
/* ..and execute engine specific operations (i.e. boot) */
if (pdata->finalize_poweron)
pdata->finalize_poweron(pdev);
}
}
EXPORT_SYMBOL(nvhost_module_reset);
int nvhost_module_busy(struct platform_device *dev)
{
int err;
err = pm_runtime_get_sync(&dev->dev);
if (err < 0) {
pm_runtime_put_noidle(&dev->dev);
return err;
}
return 0;
}
EXPORT_SYMBOL(nvhost_module_busy);
void nvhost_module_idle_mult(struct platform_device *pdev, int refs)
{
struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
while (refs--) {
pm_runtime_mark_last_busy(&pdev->dev);
if (pdata->autosuspend_delay)
pm_runtime_put_autosuspend(&pdev->dev);
else
pm_runtime_put(&pdev->dev);
}
}
EXPORT_SYMBOL(nvhost_module_idle_mult);
inline void nvhost_module_idle(struct platform_device *pdev)
{
nvhost_module_idle_mult(pdev, 1);
}
EXPORT_SYMBOL(nvhost_module_idle);
static int nvhost_module_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvhost_device_data *pdata = dev_get_drvdata(dev);
int err;
err = clk_bulk_prepare_enable(pdata->num_clks, pdata->clks);
if (err < 0) {
dev_err(&pdev->dev, "failed to enabled clocks: %d\n", err);
return err;
}
if (pdata->poweron_reset)
nvhost_module_reset(pdev, false);
/* Load clockgating registers */
nvhost_module_load_regs(pdev, pdata->engine_can_cg);
if (pdata->finalize_poweron)
err = pdata->finalize_poweron(pdev);
return err;
}
static int nvhost_module_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvhost_device_data *pdata = dev_get_drvdata(dev);
int err;
if (pdata->prepare_poweroff) {
err = pdata->prepare_poweroff(pdev);
if (err)
return err;
}
clk_bulk_disable_unprepare(pdata->num_clks, pdata->clks);
return 0;
}
const struct dev_pm_ops nvhost_module_pm_ops = {
SET_RUNTIME_PM_OPS(nvhost_module_runtime_suspend,
nvhost_module_runtime_resume, NULL)
};
EXPORT_SYMBOL(nvhost_module_pm_ops);
static struct platform_driver nvhost_driver = {
.driver = {
.name = "host1x-nvhost",
},
};
module_platform_driver(nvhost_driver);
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2017, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __LINUX_NVHOST_T194_H__
#define __LINUX_NVHOST_T194_H__
int nvhost_syncpt_unit_interface_get_aperture(
struct platform_device *host_pdev,
phys_addr_t *base,
size_t *size);
u32 nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id);
#ifdef CONFIG_TEGRA_HOST1X
u32 nvhost_syncpt_unit_interface_get_byte_offset_ext(
struct platform_device *host_pdev,
u32 syncpt_id);
#else
static inline u32 nvhost_syncpt_unit_interface_get_byte_offset_ext(
struct platform_device *host_pdev,
u32 syncpt_id)
{
return nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
}
#endif
#endif /* __LINUX_NVHOST_T194_H__ */

View File

@@ -0,0 +1,63 @@
/*
* include/uapi/linux/nvdev_fence.h
*
* Tegra PVA/DLA fence support
*
* Copyright (c) 2018-2021, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef LINUX_NVDEV_FENCE_H
#define LINUX_NVDEV_FENCE_H
#include <linux/types.h>
/* used for the recording with keventlib */
enum nvdev_fence_kind {
NVDEV_FENCE_KIND_PRE = 0,
NVDEV_FENCE_KIND_POST
};
/**
* struct nvdev_fence structure for passing fence information
*
* @type: Type of the fence (syncpoint, sync fd or semaphore)
* @type: fence action (wait or signal)
* @syncpoint_index: Syncpoint id
* @syncpoint_value: Value of syncpoint id
* @sync_fd: Linux sync FD handle
* @semaphore_handle: File handle to the semaphore memory buffer
* @semaphore_offset: Offset to the semaphore within the buffer
* @semaphore_value: Value of the semaphore
*/
struct nvdev_fence {
__u32 type;
#define NVDEV_FENCE_TYPE_SYNCPT 0
#define NVDEV_FENCE_TYPE_SYNC_FD 1
#define NVDEV_FENCE_TYPE_SEMAPHORE 2
#define NVDEV_FENCE_TYPE_SEMAPHORE_TS 3
__u32 action;
#define NVDEV_FENCE_WAIT 0
#define NVDEV_FENCE_SIGNAL 1
#define NVDEV_FENCE_SIGNAL_STRIDE 2
__u32 syncpoint_index;
__u32 syncpoint_value;
__u32 sync_fd;
__u32 semaphore_handle;
__u32 semaphore_offset;
__u32 semaphore_value;
};
#endif /* LINUX_NVDEV_FENCE_H */