diff --git a/drivers/gpu/host1x-nvhost/Makefile b/drivers/gpu/host1x-nvhost/Makefile new file mode 100644 index 00000000..65fbd82d --- /dev/null +++ b/drivers/gpu/host1x-nvhost/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Tegra Host1x-Nvhost Driver. +# +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# + +ccflags-y += -I$(srctree.nvidia)/include +ccflags-y += -I$(srctree.nvidia)/include/uapi/linux +ccflags-y += -I$(srctree.host1x)/include +ccflags-y += -DCONFIG_TEGRA_HOST1X +ccflags-y += -Werror + +host1x-nvhost-objs = nvhost.o falcon.o + +obj-m += host1x-nvhost.o diff --git a/drivers/gpu/host1x-nvhost/falcon.c b/drivers/gpu/host1x-nvhost/falcon.c new file mode 100644 index 00000000..f6901546 --- /dev/null +++ b/drivers/gpu/host1x-nvhost/falcon.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2022, NVIDIA Corporation. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "falcon.h" + +enum falcon_memory { + FALCON_MEMORY_IMEM, + FALCON_MEMORY_DATA, +}; + +static void falcon_writel(struct falcon *falcon, u32 value, u32 offset) +{ + writel(value, falcon->regs + offset); +} + +int falcon_wait_idle(struct falcon *falcon) +{ + u32 value; + + return readl_poll_timeout(falcon->regs + FALCON_IDLESTATE, value, + (value == 0), 10, 100000); +} + +static int falcon_dma_wait_idle(struct falcon *falcon) +{ + u32 value; + + return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value, + (value & FALCON_DMATRFCMD_IDLE), 10, 100000); +} + +static int falcon_copy_chunk(struct falcon *falcon, + phys_addr_t base, + unsigned long offset, + enum falcon_memory target) +{ + u32 cmd = FALCON_DMATRFCMD_SIZE_256B; + + if (target == FALCON_MEMORY_IMEM) + cmd |= FALCON_DMATRFCMD_IMEM; + + falcon_writel(falcon, offset, FALCON_DMATRFMOFFS); + falcon_writel(falcon, base, FALCON_DMATRFFBOFFS); + falcon_writel(falcon, cmd, FALCON_DMATRFCMD); + + return falcon_dma_wait_idle(falcon); +} + +static void falcon_copy_firmware_image(struct falcon *falcon, + const struct firmware *firmware) +{ + u32 *virt = falcon->firmware.virt; + size_t i; + + /* copy the whole thing taking into account endianness */ + for (i = 0; i < firmware->size / sizeof(u32); i++) + virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]); +} + +static int falcon_parse_firmware_image(struct falcon *falcon) +{ + struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt; + struct falcon_fw_os_header_v1 *os; + + /* endian problems would show up right here */ + if (bin->magic != PCI_VENDOR_ID_NVIDIA && bin->magic != 0x10fe) { + dev_err(falcon->dev, "incorrect firmware magic\n"); + return -EINVAL; + } + + /* currently only version 1 is supported */ + if (bin->version != 1) { + dev_err(falcon->dev, "unsupported firmware version\n"); + return -EINVAL; + } + + /* check that the firmware size is consistent */ + if (bin->size > falcon->firmware.size) { + dev_err(falcon->dev, "firmware image size inconsistency\n"); + return -EINVAL; + } + + os = falcon->firmware.virt + bin->os_header_offset; + + falcon->firmware.bin_data.size = bin->os_size; + falcon->firmware.bin_data.offset = bin->os_data_offset; + falcon->firmware.code.offset = os->code_offset; + falcon->firmware.code.size = os->code_size; + falcon->firmware.data.offset = os->data_offset; + falcon->firmware.data.size = os->data_size; + + return 0; +} + +int falcon_read_firmware(struct falcon *falcon, const char *name) +{ + int err; + + /* request_firmware prints error if it fails */ + err = request_firmware(&falcon->firmware.firmware, name, falcon->dev); + if (err < 0) + return err; + + falcon->firmware.size = falcon->firmware.firmware->size; + + return 0; +} + +int falcon_load_firmware(struct falcon *falcon) +{ + const struct firmware *firmware = falcon->firmware.firmware; + int err; + + /* copy firmware image into local area. this also ensures endianness */ + falcon_copy_firmware_image(falcon, firmware); + + /* parse the image data */ + err = falcon_parse_firmware_image(falcon); + if (err < 0) { + dev_err(falcon->dev, "failed to parse firmware image\n"); + return err; + } + + release_firmware(firmware); + falcon->firmware.firmware = NULL; + + return 0; +} + +int falcon_init(struct falcon *falcon) +{ + falcon->firmware.virt = NULL; + + return 0; +} + +void falcon_exit(struct falcon *falcon) +{ + if (falcon->firmware.firmware) + release_firmware(falcon->firmware.firmware); +} + +int falcon_boot(struct falcon *falcon) +{ + unsigned long offset; + u32 value; + int err; + + if (!falcon->firmware.virt) + return -EINVAL; + + err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value, + (value & (FALCON_DMACTL_IMEM_SCRUBBING | + FALCON_DMACTL_DMEM_SCRUBBING)) == 0, + 10, 10000); + if (err < 0) + return err; + + falcon_writel(falcon, 0, FALCON_DMACTL); + + /* setup the address of the binary data so Falcon can access it later */ + falcon_writel(falcon, (falcon->firmware.iova + + falcon->firmware.bin_data.offset) >> 8, + FALCON_DMATRFBASE); + + /* copy the data segment into Falcon internal memory */ + for (offset = 0; offset < falcon->firmware.data.size; offset += 256) + falcon_copy_chunk(falcon, + falcon->firmware.data.offset + offset, + offset, FALCON_MEMORY_DATA); + + /* copy the code segment into Falcon internal memory */ + for (offset = 0; offset < falcon->firmware.code.size; offset += 256) + falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset, + offset, FALCON_MEMORY_IMEM); + + /* setup falcon interrupts */ + falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) | + FALCON_IRQMSET_SWGEN1 | + FALCON_IRQMSET_SWGEN0 | + FALCON_IRQMSET_EXTERR | + FALCON_IRQMSET_HALT | + FALCON_IRQMSET_WDTMR, + FALCON_IRQMSET); + falcon_writel(falcon, FALCON_IRQDEST_EXT(0xff) | + FALCON_IRQDEST_SWGEN1 | + FALCON_IRQDEST_SWGEN0 | + FALCON_IRQDEST_EXTERR | + FALCON_IRQDEST_HALT, + FALCON_IRQDEST); + + /* enable interface */ + falcon_writel(falcon, FALCON_ITFEN_MTHDEN | + FALCON_ITFEN_CTXEN, + FALCON_ITFEN); + + /* boot falcon */ + falcon_writel(falcon, 0x00000000, FALCON_BOOTVEC); + falcon_writel(falcon, FALCON_CPUCTL_STARTCPU, FALCON_CPUCTL); + + err = falcon_wait_idle(falcon); + if (err < 0) { + dev_err(falcon->dev, "Falcon boot failed due to timeout\n"); + return err; + } + + return 0; +} + +void falcon_execute_method(struct falcon *falcon, u32 method, u32 data) +{ + falcon_writel(falcon, method >> 2, FALCON_UCLASS_METHOD_OFFSET); + falcon_writel(falcon, data, FALCON_UCLASS_METHOD_DATA); +} diff --git a/drivers/gpu/host1x-nvhost/falcon.h b/drivers/gpu/host1x-nvhost/falcon.h new file mode 100644 index 00000000..3c4a039a --- /dev/null +++ b/drivers/gpu/host1x-nvhost/falcon.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2022, NVIDIA Corporation. All rights reserved. + */ + +#ifndef _FALCON_H_ +#define _FALCON_H_ + +#include + +#define FALCON_UCLASS_METHOD_OFFSET 0x00000040 + +#define FALCON_UCLASS_METHOD_DATA 0x00000044 + +#define FALCON_IRQMSET 0x00001010 +#define FALCON_IRQMSET_WDTMR (1 << 1) +#define FALCON_IRQMSET_HALT (1 << 4) +#define FALCON_IRQMSET_EXTERR (1 << 5) +#define FALCON_IRQMSET_SWGEN0 (1 << 6) +#define FALCON_IRQMSET_SWGEN1 (1 << 7) +#define FALCON_IRQMSET_EXT(v) (((v) & 0xff) << 8) + +#define FALCON_IRQDEST 0x0000101c +#define FALCON_IRQDEST_HALT (1 << 4) +#define FALCON_IRQDEST_EXTERR (1 << 5) +#define FALCON_IRQDEST_SWGEN0 (1 << 6) +#define FALCON_IRQDEST_SWGEN1 (1 << 7) +#define FALCON_IRQDEST_EXT(v) (((v) & 0xff) << 8) + +#define FALCON_ITFEN 0x00001048 +#define FALCON_ITFEN_CTXEN (1 << 0) +#define FALCON_ITFEN_MTHDEN (1 << 1) + +#define FALCON_IDLESTATE 0x0000104c + +#define FALCON_CPUCTL 0x00001100 +#define FALCON_CPUCTL_STARTCPU (1 << 1) + +#define FALCON_BOOTVEC 0x00001104 + +#define FALCON_DMACTL 0x0000110c +#define FALCON_DMACTL_DMEM_SCRUBBING (1 << 1) +#define FALCON_DMACTL_IMEM_SCRUBBING (1 << 2) + +#define FALCON_DMATRFBASE 0x00001110 + +#define FALCON_DMATRFMOFFS 0x00001114 + +#define FALCON_DMATRFCMD 0x00001118 +#define FALCON_DMATRFCMD_IDLE (1 << 1) +#define FALCON_DMATRFCMD_IMEM (1 << 4) +#define FALCON_DMATRFCMD_SIZE_256B (6 << 8) + +#define FALCON_DMATRFFBOFFS 0x0000111c + +struct falcon_fw_bin_header_v1 { + u32 magic; /* 0x10de */ + u32 version; /* version of bin format (1) */ + u32 size; /* entire image size including this header */ + u32 os_header_offset; + u32 os_data_offset; + u32 os_size; +}; + +struct falcon_fw_os_app_v1 { + u32 offset; + u32 size; +}; + +struct falcon_fw_os_header_v1 { + u32 code_offset; + u32 code_size; + u32 data_offset; + u32 data_size; +}; + +struct falcon_firmware_section { + unsigned long offset; + size_t size; +}; + +struct falcon_firmware { + /* Firmware after it is read but not loaded */ + const struct firmware *firmware; + + /* Raw firmware data */ + dma_addr_t iova; + dma_addr_t phys; + void *virt; + size_t size; + + /* Parsed firmware information */ + struct falcon_firmware_section bin_data; + struct falcon_firmware_section data; + struct falcon_firmware_section code; +}; + +struct falcon { + /* Set by falcon client */ + struct device *dev; + void __iomem *regs; + + struct falcon_firmware firmware; +}; + +int falcon_init(struct falcon *falcon); +void falcon_exit(struct falcon *falcon); +int falcon_read_firmware(struct falcon *falcon, const char *firmware_name); +int falcon_load_firmware(struct falcon *falcon); +int falcon_boot(struct falcon *falcon); +void falcon_execute_method(struct falcon *falcon, u32 method, u32 data); +int falcon_wait_idle(struct falcon *falcon); + +#endif /* _FALCON_H_ */ diff --git a/drivers/gpu/host1x-nvhost/nvhost.c b/drivers/gpu/host1x-nvhost/nvhost.c new file mode 100644 index 00000000..cd5f6436 --- /dev/null +++ b/drivers/gpu/host1x-nvhost/nvhost.c @@ -0,0 +1,826 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022, NVIDIA Corporation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "falcon.h" + +#define TEGRA194_SYNCPT_PAGE_SIZE 0x1000 +#define TEGRA194_SYNCPT_SHIM_BASE 0x60000000 +#define TEGRA194_SYNCPT_SHIM_SIZE 0x00400000 + +#define THI_STREAMID0 0x00000030 +#define THI_STREAMID1 0x00000034 + +#define NVHOST_NUM_CDEV 1 + +struct nvhost_syncpt_interface { + dma_addr_t base; + uint32_t page_size; +}; + +u32 host1x_readl(struct platform_device *pdev, u32 r) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + void __iomem *addr = pdata->aperture[0] + r; + + return readl(addr); +} +EXPORT_SYMBOL(host1x_readl); + +void host1x_writel(struct platform_device *pdev, u32 r, u32 v) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + void __iomem *addr = pdata->aperture[0] + r; + + writel(v, addr); +} +EXPORT_SYMBOL(host1x_writel); + +static const struct of_device_id host1x_match[] = { + { .compatible = "nvidia,tegra194-host1x", }, + {}, +}; + +static int nvhost_get_host1x_dev(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct platform_device *host1x_pdev; + struct device_node *np; + + np = of_find_matching_node(NULL, host1x_match); + if (!np) { + dev_err(&pdev->dev, "Failed to find host1x!\n"); + return -ENODEV; + } + + host1x_pdev = of_find_device_by_node(np); + if (!host1x_pdev) { + dev_dbg(&pdev->dev, "host1x device not available\n"); + return -EPROBE_DEFER; + } + + pdata->host1x = platform_get_drvdata(host1x_pdev); + if (!pdata->host1x) { + dev_warn(&pdev->dev, "No platform data for host1x!\n"); + return -ENODEV; + } + + return 0; +} + +static struct device *nvhost_client_device_create(struct platform_device *pdev, + struct cdev *cdev, + const char *cdev_name, + dev_t devno, + const struct file_operations *ops) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct device *dev; + int err; + + pdata->nvhost_class = class_create(THIS_MODULE, pdev->dev.of_node->name); + if (IS_ERR(pdata->nvhost_class)) { + dev_err(&pdev->dev, "failed to create class\n"); + return ERR_CAST(pdata->nvhost_class); + } + + cdev_init(cdev, ops); + cdev->owner = THIS_MODULE; + + err = cdev_add(cdev, devno, 1); + if (err < 0) { + dev_err(&pdev->dev, "failed to add cdev\n"); + return ERR_PTR(err); + } + + dev = device_create(pdata->nvhost_class, &pdev->dev, devno, NULL, + (pdev->id <= 0) ? "nvhost-%s%s" : "nvhost-%s%s.%d", + cdev_name, pdev->dev.of_node->name, pdev->id); + + if (IS_ERR(dev)) { + dev_err(&pdev->dev, "failed to create %s device\n", cdev_name); + cdev_del(cdev); + } + + return dev; +} + +int nvhost_client_device_get_resources(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct resource *res; + int err; + + err = nvhost_get_host1x_dev(pdev); + if (err) + return err; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + pdata->aperture[0] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdata->aperture[0])) + return PTR_ERR(pdata->aperture); + + return 0; +} +EXPORT_SYMBOL(nvhost_client_device_get_resources); + +int nvhost_client_device_init(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + dev_t devno; + int err; + + err = alloc_chrdev_region(&devno, 0, NVHOST_NUM_CDEV, "nvhost"); + if (err < 0) { + dev_err(&pdev->dev, "failed to reserve chrdev region\n"); + return err; + } + + pdata->ctrl_node = nvhost_client_device_create(pdev, &pdata->ctrl_cdev, + "ctrl-", devno, + pdata->ctrl_ops); + if (IS_ERR(pdata->ctrl_node)) { + err = PTR_ERR(pdata->ctrl_node); + goto destroy; + } + + pdata->cdev_region = devno; + + return 0; + +destroy: + device_destroy(pdata->nvhost_class, pdata->ctrl_cdev.dev); + + return err; +} +EXPORT_SYMBOL(nvhost_client_device_init); + +int nvhost_client_device_release(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + + if (!IS_ERR_OR_NULL(pdata->ctrl_node)) { + device_destroy(pdata->nvhost_class, pdata->ctrl_cdev.dev); + cdev_del(&pdata->ctrl_cdev); + } + + unregister_chrdev_region(pdata->cdev_region, NVHOST_NUM_CDEV); + + return 0; +} +EXPORT_SYMBOL(nvhost_client_device_release); + +u32 nvhost_get_syncpt_host_managed(struct platform_device *pdev, + u32 param, const char *syncpt_name) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct host1x_syncpt *sp; + + sp = host1x_syncpt_alloc(pdata->host1x, 0, syncpt_name ? syncpt_name : + dev_name(&pdev->dev)); + if (!sp) + return 0; + + return host1x_syncpt_id(sp); +} +EXPORT_SYMBOL(nvhost_get_syncpt_host_managed); + +struct host1x_syncpt *nvhost_syncpt_get_by_id(struct platform_device *pdev, + u32 id) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + + return host1x_syncpt_get_by_id(pdata->host1x, id); +} +EXPORT_SYMBOL(nvhost_syncpt_get_by_id); + +void nvhost_syncpt_put_ref_ext(struct platform_device *pdev, u32 id) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct host1x_syncpt *sp; + + sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); + if (WARN_ON(!sp)) + return; + + host1x_syncpt_put(sp); +} +EXPORT_SYMBOL(nvhost_syncpt_put_ref_ext); + +bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *pdev, u32 id) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct host1x_syncpt *sp; + + if (!pdata || pdata->host1x) + return -ENODEV; + + sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); + + return sp ? true : false; +} +EXPORT_SYMBOL(nvhost_syncpt_is_valid_pt_ext); + +int nvhost_syncpt_is_expired_ext(struct platform_device *pdev, u32 id, + u32 thresh) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct host1x_syncpt *sp; + + sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); + if (WARN_ON(!sp)) + return true; + + if (host1x_syncpt_wait(sp, thresh, 0, NULL)) + return false; + + return true; +} +EXPORT_SYMBOL(nvhost_syncpt_is_expired_ext); + +void nvhost_syncpt_set_minval(struct platform_device *pdev, u32 id, u32 val) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct host1x_syncpt *sp; + u32 cur; + + sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); + if (WARN_ON(!sp)) + return; + + cur = host1x_syncpt_read(sp); + + while (cur++ != val) + host1x_syncpt_incr(sp); +} +EXPORT_SYMBOL(nvhost_syncpt_set_minval); + +void nvhost_syncpt_set_min_update(struct platform_device *pdev, u32 id, u32 val) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct host1x_syncpt *sp; + u32 cur; + + sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); + if (WARN_ON(!sp)) + return; + + cur = host1x_syncpt_read(sp); + + while (cur++ != val) + host1x_syncpt_incr(sp); + + host1x_syncpt_read(sp); +} +EXPORT_SYMBOL(nvhost_syncpt_set_min_update); + +u32 nvhost_syncpt_read_maxval(struct platform_device *pdev, u32 id) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct host1x_syncpt *sp; + + sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); + if (WARN_ON(!sp)) + return 0; + + return host1x_syncpt_read_max(sp); +} +EXPORT_SYMBOL(nvhost_syncpt_read_maxval); + +u32 nvhost_syncpt_incr_max_ext(struct platform_device *pdev, u32 id, u32 incrs) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct host1x_syncpt *sp; + + sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); + if (WARN_ON(!sp)) + return 0; + + return host1x_syncpt_incr_max(sp, incrs); +} +EXPORT_SYMBOL(nvhost_syncpt_incr_max_ext); + +static int nvhost_syncpt_get_aperture(struct device_node *np, u64 *base, + size_t *size) +{ + if (of_device_is_compatible(np, "nvidia,tegra194-host1x")) { + *base = TEGRA194_SYNCPT_SHIM_BASE; + *size = TEGRA194_SYNCPT_SHIM_SIZE; + return 0; + } + + return -ENODEV; +} + +static int nvhost_syncpt_get_page_size(struct device_node *np, uint32_t *size) +{ + if (of_device_is_compatible(np, "nvidia,tegra194-host1x")) { + *size = TEGRA194_SYNCPT_PAGE_SIZE; + return 0; + } + + return -ENODEV; +} + +int nvhost_syncpt_unit_interface_init(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvhost_syncpt_interface *syncpt_if; + size_t size; + u64 base; + int err; + + syncpt_if = devm_kzalloc(&pdev->dev, sizeof(*syncpt_if), GFP_KERNEL); + if (!syncpt_if) + return -ENOMEM; + + err = nvhost_syncpt_get_aperture(pdev->dev.parent->of_node, &base, + &size); + if (err < 0) { + dev_err(&pdev->dev, "failed to get syncpt aperture\n"); + return err; + } + + err = nvhost_syncpt_get_page_size(pdev->dev.parent->of_node, + &syncpt_if->page_size); + if (err < 0) { + dev_err(&pdev->dev, "failed to get syncpt page size\n"); + return err; + } + + /* If IOMMU is enabled, map it into the device memory */ + if (iommu_get_domain_for_dev(&pdev->dev)) { + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, phys_to_page(base), size, 0); + + err = dma_map_sg_attrs(&pdev->dev, &sg, 1, + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (err == 0) { + err = -ENOMEM; + return err; + } + + syncpt_if->base = sg_dma_address(&sg); + } else { + syncpt_if->base = base; + } + + pdata->syncpt_unit_interface = syncpt_if; + + dev_info(&pdev->dev, + "syncpt_unit_base %llx syncpt_unit_size %zx size %x\n", + base, size, syncpt_if->page_size); + + return 0; +} +EXPORT_SYMBOL(nvhost_syncpt_unit_interface_init); + +dma_addr_t nvhost_syncpt_address(struct platform_device *pdev, u32 id) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvhost_syncpt_interface *syncpt_if = pdata->syncpt_unit_interface; + + return syncpt_if->base + syncpt_if->page_size * id; +} +EXPORT_SYMBOL(nvhost_syncpt_address); + +static irqreturn_t flcn_isr(int irq, void *dev_id) +{ + struct platform_device *pdev = (struct platform_device *)(dev_id); + struct nvhost_device_data *pdata = nvhost_get_devdata(pdev); + + if (pdata->flcn_isr) + pdata->flcn_isr(pdev); + + return IRQ_HANDLED; +} + +int flcn_intr_init(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = nvhost_get_devdata(pdev); + int ret = 0; + + pdata->irq = platform_get_irq(pdev, 0); + if (pdata->irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ\n"); + return -ENXIO; + } + + spin_lock_init(&pdata->mirq_lock); + ret = request_irq(pdata->irq, flcn_isr, 0, dev_name(&pdev->dev), pdev); + if (ret) { + dev_err(&pdev->dev, "failed to request irq. err %d\n", ret); + return ret; + } + + /* keep irq disabled */ + disable_irq(pdata->irq); + + return 0; +} +EXPORT_SYMBOL(flcn_intr_init); + +int flcn_reload_fw(struct platform_device *pdev) +{ + /* TODO: Used by debugfs */ + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(flcn_reload_fw); + +int nvhost_flcn_prepare_poweroff(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + + if (pdata->flcn_isr) + disable_irq(pdata->irq); + + return 0; +} +EXPORT_SYMBOL(nvhost_flcn_prepare_poweroff); + +static int nvhost_flcn_load_firmware(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct falcon *falcon = pdata->falcon_data; + dma_addr_t iova; + size_t size; + void *virt; + int err; + + if (falcon->firmware.virt) + return 0; + + err = falcon_read_firmware(falcon, pdata->firmware_name); + if (err < 0) + return err; + + size = falcon->firmware.size; + virt = dma_alloc_coherent(&pdev->dev, size, &iova, GFP_KERNEL); + if (!virt) + return -ENOMEM; + + falcon->firmware.virt = virt; + falcon->firmware.iova = iova; + + err = falcon_load_firmware(falcon); + if (err < 0) + goto cleanup; + + return 0; + +cleanup: + dma_free_coherent(&pdev->dev, size, virt, iova); + + return err; +} + +int nvhost_flcn_finalize_poweron(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct falcon *falcon = pdata->falcon_data; +#ifdef CONFIG_IOMMU_API + struct iommu_fwspec *spec = dev_iommu_fwspec_get(&pdev->dev); +#endif + int err; + u32 value; + +#ifdef CONFIG_IOMMU_API + if (spec) { + host1x_writel(pdev, pdata->transcfg_addr, pdata->transcfg_val); + + if (spec->num_ids > 0) { + value = spec->ids[0] & 0xffff; + host1x_writel(pdev, THI_STREAMID0, value); + host1x_writel(pdev, THI_STREAMID1, value); + } + } +#endif + + err = falcon_boot(falcon); + if (err < 0) + return err; + + err = falcon_wait_idle(falcon); + if (err < 0) { + dev_err(&pdev->dev, "falcon boot timed out\n"); + return err; + } + + return 0; +} +EXPORT_SYMBOL(nvhost_flcn_finalize_poweron); + +struct nvhost_host1x_cb { + struct dma_fence_cb cb; + struct work_struct work; + void (*notifier)(void *data, int unused); + void *notifier_data; +}; + +static void nvhost_host1x_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct nvhost_host1x_cb *host1x_cb; + + host1x_cb = container_of(cb, struct nvhost_host1x_cb, cb); + schedule_work(&host1x_cb->work); + dma_fence_put(f); +} + +static void nvhost_intr_do_work(struct work_struct *work) +{ + struct nvhost_host1x_cb *host1x_cb; + + host1x_cb = container_of(work, struct nvhost_host1x_cb, work); + host1x_cb->notifier(host1x_cb->notifier_data, 0); + kfree(host1x_cb); +} + +int nvhost_intr_register_notifier(struct platform_device *pdev, + u32 id, u32 thresh, + void (*callback)(void *data, int unused), + void *private_data) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct dma_fence *fence; + struct nvhost_host1x_cb *cb; + struct host1x_syncpt *sp; + int err; + + sp = host1x_syncpt_get_by_id_noref(pdata->host1x, id); + if (!sp) + return -EINVAL; + + fence = host1x_fence_create(sp, thresh); + if (IS_ERR(fence)) { + pr_err("error %d during construction of fence!", + (int)PTR_ERR(fence)); + return PTR_ERR(fence); + } + + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) { + dma_fence_put(fence); + return -ENOMEM; + } + + INIT_WORK(&cb->work, nvhost_intr_do_work); + cb->notifier = callback; + cb->notifier_data = private_data; + + err = dma_fence_add_callback(fence, &cb->cb, nvhost_host1x_cb_func); + if (err < 0) { + dma_fence_put(fence); + kfree(cb); + } + + return err; +} +EXPORT_SYMBOL(nvhost_intr_register_notifier); + +void nvhost_module_deinit(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct falcon *falcon = pdata->falcon_data; + + pm_runtime_disable(&pdev->dev); + dma_free_coherent(&pdev->dev, falcon->firmware.size, + falcon->firmware.virt, falcon->firmware.iova); + falcon_exit(falcon); + debugfs_remove(pdata->debugfs); +} +EXPORT_SYMBOL(nvhost_module_deinit); + +int nvhost_module_init(struct platform_device *pdev) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct falcon *falcon; + unsigned int i; + int err; + + falcon = devm_kzalloc(&pdev->dev, sizeof(*falcon), GFP_KERNEL); + if (!falcon) + return -ENOMEM; + + falcon->dev = &pdev->dev; + falcon->regs = pdata->aperture[0]; + + err = devm_clk_bulk_get_all(&pdev->dev, &pdata->clks); + if (err < 0) { + dev_err(&pdev->dev, "failed to get clocks %d\n", err); + return err; + } + pdata->num_clks = err; + + for (i = 0; i < pdata->num_clks; i++) { + err = clk_set_rate(pdata->clks[0].clk, ULONG_MAX); + if (err < 0) { + dev_err(&pdev->dev, "failed to set clock rate!\n"); + return err; + } + } + + pdata->reset_control = devm_reset_control_get_exclusive_released( + &pdev->dev, NULL); + if (IS_ERR(pdata->reset_control)) { + dev_err(&pdev->dev, "failed to get reset\n"); + return PTR_ERR(pdata->reset_control); + } + + reset_control_acquire(pdata->reset_control); + if (err < 0) { + dev_err(&pdev->dev, "failed to acquire reset: %d\n", err); + return err; + } + + err = clk_bulk_prepare_enable(pdata->num_clks, pdata->clks); + if (err < 0) { + reset_control_release(pdata->reset_control); + dev_err(&pdev->dev, "failed to enabled clocks: %d\n", err); + return err; + } + + reset_control_reset(pdata->reset_control); + clk_bulk_disable_unprepare(pdata->num_clks, pdata->clks); + reset_control_release(pdata->reset_control); + + if (pdata->autosuspend_delay) { + pm_runtime_set_autosuspend_delay(&pdev->dev, + pdata->autosuspend_delay); + pm_runtime_use_autosuspend(&pdev->dev); + } + + pm_runtime_enable(&pdev->dev); + if (!pm_runtime_enabled(&pdev->dev)) + return -EOPNOTSUPP; + + pdata->debugfs = debugfs_create_dir(pdev->dev.of_node->name, + NULL); + + falcon_init(falcon); + + pdata->falcon_data = falcon; + + return nvhost_flcn_load_firmware(pdev); +} +EXPORT_SYMBOL(nvhost_module_init); + +static void nvhost_module_load_regs(struct platform_device *pdev, bool prod) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + struct nvhost_gating_register *regs = pdata->engine_cg_regs; + + if (!regs) + return; + + while (regs->addr) { + if (prod) + host1x_writel(pdev, regs->addr, regs->prod); + else + host1x_writel(pdev, regs->addr, regs->disable); + regs++; + } +} + +void nvhost_module_reset(struct platform_device *pdev, bool reboot) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + int err; + + if (reboot) + if (pdata->prepare_poweroff) + pdata->prepare_poweroff(pdev); + + mutex_lock(&pdata->lock); + err = reset_control_acquire(pdata->reset_control); + if (err < 0) { + dev_err(&pdev->dev, "failed to acquire reset: %d\n", err); + } else { + reset_control_reset(pdata->reset_control); + reset_control_release(pdata->reset_control); + } + mutex_unlock(&pdata->lock); + + if (reboot) { + /* Load clockgating registers */ + nvhost_module_load_regs(pdev, pdata->engine_can_cg); + + /* ..and execute engine specific operations (i.e. boot) */ + if (pdata->finalize_poweron) + pdata->finalize_poweron(pdev); + } +} +EXPORT_SYMBOL(nvhost_module_reset); + +int nvhost_module_busy(struct platform_device *dev) +{ + int err; + + err = pm_runtime_get_sync(&dev->dev); + if (err < 0) { + pm_runtime_put_noidle(&dev->dev); + return err; + } + + return 0; +} +EXPORT_SYMBOL(nvhost_module_busy); + +void nvhost_module_idle_mult(struct platform_device *pdev, int refs) +{ + struct nvhost_device_data *pdata = platform_get_drvdata(pdev); + + while (refs--) { + pm_runtime_mark_last_busy(&pdev->dev); + if (pdata->autosuspend_delay) + pm_runtime_put_autosuspend(&pdev->dev); + else + pm_runtime_put(&pdev->dev); + } +} +EXPORT_SYMBOL(nvhost_module_idle_mult); + +inline void nvhost_module_idle(struct platform_device *pdev) +{ + nvhost_module_idle_mult(pdev, 1); +} +EXPORT_SYMBOL(nvhost_module_idle); + +int nvhost_module_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct nvhost_device_data *pdata = dev_get_drvdata(dev); + int err; + + err = clk_bulk_prepare_enable(pdata->num_clks, pdata->clks); + if (err < 0) { + dev_err(&pdev->dev, "failed to enabled clocks: %d\n", err); + return err; + } + + if (pdata->poweron_reset) + nvhost_module_reset(pdev, false); + + /* Load clockgating registers */ + nvhost_module_load_regs(pdev, pdata->engine_can_cg); + + if (pdata->flcn_isr) + enable_irq(pdata->irq); + + if (pdata->finalize_poweron) + err = pdata->finalize_poweron(pdev); + + return err; +} +EXPORT_SYMBOL(nvhost_module_runtime_resume); + +int nvhost_module_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct nvhost_device_data *pdata = dev_get_drvdata(dev); + int err; + + if (pdata->prepare_poweroff) { + err = pdata->prepare_poweroff(pdev); + if (err) + return err; + } + + clk_bulk_disable_unprepare(pdata->num_clks, pdata->clks); + + return 0; +} +EXPORT_SYMBOL(nvhost_module_runtime_suspend); + +const struct dev_pm_ops nvhost_module_pm_ops = { + SET_RUNTIME_PM_OPS(nvhost_module_runtime_suspend, + nvhost_module_runtime_resume, NULL) +}; +EXPORT_SYMBOL(nvhost_module_pm_ops); + +static struct platform_driver nvhost_driver = { + .driver = { + .name = "host1x-nvhost", + }, +}; + +module_platform_driver(nvhost_driver); +MODULE_LICENSE("GPL v2");