diff --git a/drivers/platform/tegra/nvadsp/Kconfig b/drivers/platform/tegra/nvadsp/Kconfig
new file mode 100644
index 00000000..71f2d907
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/Kconfig
@@ -0,0 +1,93 @@
+config TEGRA_NVADSP
+ tristate "Enable Host ADSP driver"
+ default n
+ select ARM_GIC_PM
+ select FIQ
+ help
+ Enables support for Host ADSP driver.
+
+ If unsure, say N
+
+config TEGRA_NVADSP_ON_SMMU
+ bool "Use SMMU to relocate ADSP"
+ depends on (TEGRA_IOMMU_SMMU || OF_TEGRA_IOMMU_SMMU) && TEGRA_NVADSP
+ default n
+ help
+ Use SMMU to relocate ADSP OS.
+
+config TEGRA_ADSP_DFS
+ bool "Enable ADSP DFS"
+ depends on TEGRA_NVADSP
+ default n
+ help
+ Enable ADSP dynamic frequency scaling. Use this config
+ to scale adsp frequency via actmon or set fixed value.
+
+ If unsure, say N
+
+config TEGRA_ADSP_ACTMON
+ bool "Enable ADSP ACTMON"
+ depends on TEGRA_ADSP_DFS
+ default n
+ help
+ Enable ADSP actmon. It converts adsp activty to frequency and
+ asks adsp dfs to set the adsp frequency. Use it if adsp frequency
+ to be scaled dynamically by actmon.
+
+ If unsure, say N
+
+config TEGRA_ADSP_CPUSTAT
+ bool "Enable ADSP CPUSTAT"
+ depends on DEBUG_FS && TEGRA_NVADSP && !TEGRA_ADSP_ACTMON
+ default n
+ help
+ Enable ADSP cpu usage measurement using actmon
+
+ If unsure, say N
+
+config TEGRA_ADSP_FILEIO
+ bool "Enable ADSP file io"
+ depends on TEGRA_NVADSP
+ default n
+ help
+ Enable dumping to and reading from file on host from ADSP
+
+ If unsure, say N
+
+config TEGRA_ADSP_LPTHREAD
+ bool "Enable ADSP usage calc by lpthread"
+ depends on DEBUG_FS && TEGRA_NVADSP
+ default n
+ help
+ Enable calculation of ADSP usage by running a low priority
+ thread in background whenever OS is not suspended. Can be
+ enable or disabled by echo to adsp_usage file.
+
+ If unsure, say N
+
+config TEGRA_EMC_APE_DFS
+ bool "Enable emc dfs due to APE"
+ depends on TEGRA_NVADSP
+ default n
+ help
+ Enable emc dfs due to APE DRAM access
+
+ If unsure, say N
+
+config TEGRA_ADSP_CONSOLE
+ bool "Enable ADSP console"
+ depends on TEGRA_NVADSP
+ default y
+ help
+ Enable ADSP console access
+
+ If unsure, say N
+
+config MBOX_ACK_HANDLER
+ bool "Enable mailbox acknowledge handler"
+ depends on TEGRA_NVADSP
+ default n
+ help
+ Enable mailbox acknowledge handler
+
+ if unsure, say N
diff --git a/drivers/platform/tegra/nvadsp/Makefile b/drivers/platform/tegra/nvadsp/Makefile
new file mode 100644
index 00000000..087db77e
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/Makefile
@@ -0,0 +1,42 @@
+GCOV_PROFILE := y
+ccflags-y += -Werror
+
+obj-$(CONFIG_TEGRA_NVADSP) := nvadsp.o
+nvadsp-objs += dev.o os.o app.o app_loader_linker.o\
+ amc.o nvadsp_shared_sema.o \
+ hwmailbox.o mailbox.o msgq.o \
+ mem_manager.o aram_manager.o dram_app_mem_manager.o \
+ dev-t21x.o os-t21x.o dev-t18x.o os-t18x.o acast.o
+
+
+ifeq ($(CONFIG_TEGRA_ADSP_DFS),y)
+nvadsp-objs += adsp_dfs.o
+endif
+
+ifeq ($(CONFIG_TEGRA_ADSP_ACTMON),y)
+nvadsp-objs += ape_actmon.o
+endif
+
+ifeq ($(CONFIG_TEGRA_EMC_APE_DFS),y)
+nvadsp-objs += emc_dfs.o
+endif
+
+ifeq ($(CONFIG_TEGRA_ADSP_CONSOLE),y)
+nvadsp-objs += adsp_console_dbfs.o
+endif
+
+ifeq ($(CONFIG_TEGRA_ADSP_CPUSTAT),y)
+nvadsp-objs += adsp_cpustat.o
+endif
+
+ifeq ($(CONFIG_TEGRA_ADSP_FILEIO),y)
+nvadsp-objs += adspff.o
+endif
+
+ifeq ($(CONFIG_TEGRA_ADSP_LPTHREAD),y)
+nvadsp-objs += adsp_lpthread.o
+endif
+
+ifeq ($(CONFIG_TEGRA_VIRT_AUDIO_IVC),y)
+ccflags-y += -I$(srctree.nvidia)/drivers/platform/tegra/nvaudio_ivc/
+endif
diff --git a/drivers/platform/tegra/nvadsp/acast.c b/drivers/platform/tegra/nvadsp/acast.c
new file mode 100644
index 00000000..f8de7f40
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/acast.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2016-2022 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include
+#include "dev.h"
+#include "dev-t18x.h"
+
+#define AST_CONTROL 0x000
+#define AST_STREAMID_CTL_0 0x020
+#define AST_STREAMID_CTL_1 0x024
+#define AST_RGN_SLAVE_BASE_LO 0x100
+#define AST_RGN_SLAVE_BASE_HI 0x104
+#define AST_RGN_MASK_BASE_LO 0x108
+#define AST_RGN_MASK_BASE_HI 0x10c
+#define AST_RGN_MASTER_BASE_LO 0x110
+#define AST_RGN_MASTER_BASE_HI 0x114
+#define AST_RGN_CONTROL 0x118
+
+#define AST_PAGE_MASK (~0xFFF)
+#define AST_LO_SHIFT 32
+#define AST_LO_MASK 0xFFFFFFFF
+#define AST_PHY_SID_IDX 0
+#define AST_APE_SID_IDX 1
+#define AST_NS (1 << 3)
+#define AST_CARVEOUTID(ID) (ID << 5)
+#define AST_VMINDEX(IDX) (IDX << 15)
+#define AST_PHSICAL(PHY) (PHY << 19)
+#define AST_STREAMID(ID) (ID << 8)
+#define AST_VMINDEX_ENABLE (1 << 0)
+#define AST_RGN_ENABLE (1 << 0)
+#define AST_RGN_OFFSET 0x20
+
+struct acast_region {
+ u32 rgn;
+ u32 rgn_ctrl;
+ u32 strmid_reg;
+ u32 strmid_ctrl;
+ u64 slave;
+ u64 size;
+ u64 master;
+};
+
+#define NUM_MAX_ACAST 2
+
+#define ACAST_RGN_PHY 0x0
+#define ACAST_RGN_CTL_PHY (AST_PHSICAL(1) | AST_CARVEOUTID(0x7))
+
+#define ACAST_RGN_VM 0x2
+#define ACAST_VMINDEX 1
+#define ACAST_RGN_CTL_VM(IDX) AST_VMINDEX(IDX)
+#define ACAST_SID_REG_EVAL(IDX) AST_STREAMID_CTL_##IDX
+#define ACAST_STRMID_REG(IDX) ACAST_SID_REG_EVAL(IDX)
+
+#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE
+/* Older kernels do not have this function, so stubbing it */
+static inline int of_property_read_u64_index(const struct device_node *np,
+ const char *propname, u32 index, u64 *out_value)
+{
+ return -ENOSYS;
+}
+#endif
+
+static inline void acast_write(void __iomem *acast, u32 reg, u32 val)
+{
+ writel(val, acast + reg);
+}
+
+static inline u32 acast_read(void __iomem *acast, u32 reg)
+{
+ return readl(acast + reg);
+}
+
+static inline u32 acast_rgn_reg(u32 rgn, u32 reg)
+{
+ return rgn * AST_RGN_OFFSET + reg;
+}
+
+static void tegra18x_acast_map(struct device *dev,
+ void __iomem *acast, u32 rgn, u32 rgn_ctrl,
+ u32 strmid_reg, u32 strmid_ctrl,
+ u64 slave, u64 size, u64 master)
+{
+ u32 val;
+
+ val = acast_read(acast, acast_rgn_reg(rgn, AST_RGN_SLAVE_BASE_LO));
+ if (val & AST_RGN_ENABLE) {
+ dev_warn(dev, "ACAST rgn %u already mapped...skipping\n", rgn);
+ return;
+ }
+
+ val = master & AST_LO_MASK;
+ acast_write(acast,
+ acast_rgn_reg(rgn, AST_RGN_MASTER_BASE_LO), val);
+ val = master >> AST_LO_SHIFT;
+ acast_write(acast,
+ acast_rgn_reg(rgn, AST_RGN_MASTER_BASE_HI), val);
+
+ val = ((size - 1) & AST_PAGE_MASK) & AST_LO_MASK;
+ acast_write(acast,
+ acast_rgn_reg(rgn, AST_RGN_MASK_BASE_LO), val);
+ val = (size - 1) >> AST_LO_SHIFT;
+ acast_write(acast,
+ acast_rgn_reg(rgn, AST_RGN_MASK_BASE_HI), val);
+
+ val = acast_read(acast, acast_rgn_reg(rgn, AST_RGN_CONTROL));
+ val |= rgn_ctrl;
+ acast_write(acast,
+ acast_rgn_reg(rgn, AST_RGN_CONTROL), val);
+
+ if (strmid_reg)
+ acast_write(acast, strmid_reg, strmid_ctrl);
+
+ val = slave >> AST_LO_SHIFT;
+ acast_write(acast,
+ acast_rgn_reg(rgn, AST_RGN_SLAVE_BASE_HI), val);
+ val = (slave & AST_LO_MASK) | AST_RGN_ENABLE;
+ acast_write(acast,
+ acast_rgn_reg(rgn, AST_RGN_SLAVE_BASE_LO), val);
+}
+
+static int tegra18x_acast_init(struct device *dev,
+ uint32_t acast_addr, uint32_t acast_size,
+ struct acast_region *acast_regions, uint32_t num_regions)
+{
+ void __iomem *acast_base;
+ int i;
+
+ acast_base = devm_ioremap(dev, acast_addr, acast_size);
+ if (IS_ERR_OR_NULL(acast_base)) {
+ dev_err(dev, "failed to map ACAST 0x%x\n", acast_addr);
+ return PTR_ERR(acast_base);
+ }
+
+ for (i = 0; i < num_regions; i++) {
+ tegra18x_acast_map(dev, acast_base,
+ acast_regions[i].rgn,
+ acast_regions[i].rgn_ctrl,
+ acast_regions[i].strmid_reg,
+ acast_regions[i].strmid_ctrl,
+ acast_regions[i].slave,
+ acast_regions[i].size,
+ acast_regions[i].master);
+
+ dev_dbg(dev, "i:%d rgn:0x%x rgn_ctrl:0x%x ",
+ i, acast_regions[i].rgn, acast_regions[i].rgn_ctrl);
+ dev_dbg(dev, "strmid_reg:0x%x strmid_ctrl:0x%x ",
+ acast_regions[i].strmid_reg,
+ acast_regions[i].strmid_ctrl);
+ dev_dbg(dev, "slave:0x%llx size:0x%llx master:0x%llx\n",
+ acast_regions[i].slave, acast_regions[i].size,
+ acast_regions[i].master);
+ }
+
+ return 0;
+}
+
+int nvadsp_acast_t18x_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct resource *co_mem = &drv_data->co_mem;
+ uint32_t acast_addr, acast_size;
+ int iter, num_acast = 0, ret = 0;
+ struct acast_region acast_config;
+
+ if (co_mem->start) {
+ acast_config.rgn = ACAST_RGN_PHY;
+ acast_config.rgn_ctrl = ACAST_RGN_CTL_PHY;
+ acast_config.strmid_reg = 0;
+ acast_config.strmid_ctrl = 0;
+ acast_config.slave = drv_data->adsp_mem[ADSP_OS_ADDR];
+ acast_config.size = drv_data->adsp_mem[ADSP_OS_SIZE];
+ acast_config.master = co_mem->start;
+ } else {
+ uint32_t stream_id;
+ uint64_t iommu_addr_start, iommu_addr_end;
+
+ if (of_property_read_u32_index(dev->of_node,
+ "iommus", 1, &stream_id)) {
+ dev_warn(dev, "no SMMU stream ID found\n");
+ goto exit;
+ }
+ if (of_property_read_u64_index(dev->of_node,
+ "iommu-resv-regions", 1, &iommu_addr_start)) {
+ dev_warn(dev, "no IOMMU reserved region\n");
+ goto exit;
+ }
+ if (of_property_read_u64_index(dev->of_node,
+ "iommu-resv-regions", 2, &iommu_addr_end)) {
+ dev_warn(dev, "no IOMMU reserved region\n");
+ goto exit;
+ }
+
+ acast_config.rgn = ACAST_RGN_VM;
+ acast_config.rgn_ctrl = ACAST_RGN_CTL_VM(ACAST_VMINDEX);
+ acast_config.strmid_reg = ACAST_STRMID_REG(ACAST_VMINDEX);
+ acast_config.strmid_ctrl = AST_STREAMID(stream_id) |
+ AST_VMINDEX_ENABLE;
+ acast_config.slave = iommu_addr_start;
+ acast_config.size = (iommu_addr_end - acast_config.slave);
+ acast_config.master = iommu_addr_start;
+ }
+
+ for (iter = 0; iter < (NUM_MAX_ACAST * 2); iter += 2) {
+ if (of_property_read_u32_index(dev->of_node,
+ "nvidia,acast_config", iter, &acast_addr))
+ continue;
+ if (of_property_read_u32_index(dev->of_node,
+ "nvidia,acast_config", (iter + 1), &acast_size))
+ continue;
+
+ ret = tegra18x_acast_init(dev, acast_addr, acast_size,
+ &acast_config, 1);
+ if (ret)
+ goto exit;
+
+ num_acast++;
+ }
+
+ if (num_acast == 0)
+ dev_warn(dev, "no ACAST configurations found\n");
+
+exit:
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/adsp_console_dbfs.c b/drivers/platform/tegra/nvadsp/adsp_console_dbfs.c
new file mode 100644
index 00000000..ff5d6cdb
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/adsp_console_dbfs.c
@@ -0,0 +1,449 @@
+/*
+ * adsp_console_dbfs.c
+ *
+ * adsp mailbox console driver
+ *
+ * Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "dev.h"
+#include "adsp_console_dbfs.h"
+
+#define USE_RUN_APP_API
+
+static int open_cnt;
+
+#define ADSP_APP_CTX_MAX 32
+
+static uint64_t adsp_app_ctx_vals[ADSP_APP_CTX_MAX];
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
+#define ACCESS_OK(addr, size) access_ok(0, addr, size)
+#else
+#define ACCESS_OK(addr, size) access_ok(addr, size)
+#endif
+
+static int adsp_app_ctx_add(uint64_t ctx)
+{
+ int i;
+
+ if (ctx == 0)
+ return -EINVAL;
+
+ for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
+ if (adsp_app_ctx_vals[i] == 0) {
+ adsp_app_ctx_vals[i] = ctx;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int adsp_app_ctx_check(uint64_t ctx)
+{
+ int i;
+
+ if (ctx == 0)
+ return -EINVAL;
+
+ for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
+ if (adsp_app_ctx_vals[i] == ctx)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+
+static void adsp_app_ctx_remove(uint64_t ctx)
+{
+ int i;
+
+ for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
+ if (adsp_app_ctx_vals[i] == ctx) {
+ adsp_app_ctx_vals[i] = 0;
+ return;
+ }
+ }
+}
+
+static int adsp_consol_open(struct inode *i, struct file *f)
+{
+
+ int ret;
+ uint16_t snd_mbox_id = 30;
+ struct nvadsp_cnsl *console = i->i_private;
+ struct device *dev = console->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ if (open_cnt)
+ return -EBUSY;
+ open_cnt++;
+ ret = 0;
+ f->private_data = console;
+ if (!drv_data->adsp_os_running)
+ goto exit_open;
+ ret = nvadsp_mbox_open(&console->shl_snd_mbox, &snd_mbox_id,
+ "adsp_send_cnsl", NULL, NULL);
+ if (!ret)
+ goto exit_open;
+ pr_err("adsp_consol: Failed to init adsp_consol send mailbox");
+ memset(&console->shl_snd_mbox, 0, sizeof(struct nvadsp_mbox));
+ open_cnt--;
+exit_open:
+ return ret;
+}
+static int adsp_consol_close(struct inode *i, struct file *f)
+{
+ int ret = 0;
+ struct nvadsp_cnsl *console = i->i_private;
+ struct nvadsp_mbox *mbox = &console->shl_snd_mbox;
+ struct device *dev = console->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ open_cnt--;
+ if (!drv_data->adsp_os_running || (0 == mbox->id))
+ goto exit_close;
+ ret = nvadsp_mbox_close(mbox);
+ if (ret)
+ pr_err("adsp_consol: Failed to close adsp_consol send mailbox)");
+ memset(mbox, 0, sizeof(struct nvadsp_mbox));
+exit_close:
+ return ret;
+}
+static long
+adsp_consol_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+
+ int ret = 0;
+ uint16_t *mid;
+ uint16_t mbxid = 0;
+ uint32_t data;
+ uint64_t ctx2;
+ nvadsp_app_info_t *app_info;
+ struct adsp_consol_run_app_arg_t app_args;
+ struct nvadsp_cnsl *console = f->private_data;
+ struct nvadsp_mbox *mbox;
+ struct device *dev = console->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ void __user *uarg = (void __user *)arg;
+
+ if (_IOC_TYPE(cmd) != NV_ADSP_CONSOLE_MAGIC)
+ return -EFAULT;
+
+ if ((_IOC_NR(cmd) != _IOC_NR(ADSP_CNSL_LOAD)) &&
+ (_IOC_NR(cmd) != _IOC_NR(ADSP_CNSL_RESUME)) &&
+ (!drv_data->adsp_os_running)) {
+ dev_info(dev, "adsp_consol: os not running.");
+ return -EPERM;
+ }
+
+ if ((_IOC_NR(cmd) != _IOC_NR(ADSP_CNSL_LOAD)) &&
+ (0 == console->shl_snd_mbox.id)) {
+ dev_info(dev, "adsp_consol: Mailboxes not open.");
+ return -EPERM;
+ }
+
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(ADSP_CNSL_LOAD):
+ ret = 0;
+
+ if (drv_data->adsp_os_running)
+ break;
+ mbxid = 30;
+ mbox = &console->shl_snd_mbox;
+ ret = nvadsp_os_load();
+ if (ret) {
+ dev_info(dev, "adsp_consol: Load OS Failed.");
+ break;
+ }
+ ret = nvadsp_os_start();
+ if (ret) {
+ dev_info(dev, "adsp_consol: Start OS Failed.");
+ break;
+ }
+ ret = nvadsp_mbox_open(mbox, &mbxid,
+ "adsp_send_cnsl", NULL, NULL);
+ if (!ret)
+ break;
+ pr_err("adsp_consol: Failed to init adsp_consol send mailbox");
+ memset(mbox, 0, sizeof(struct nvadsp_mbox));
+ break;
+ case _IOC_NR(ADSP_CNSL_SUSPEND):
+ ret = nvadsp_os_suspend();
+ if (ret)
+ dev_info(dev, "adsp_consol: OS Suspend Failed.");
+ break;
+ case _IOC_NR(ADSP_CNSL_STOP):
+ nvadsp_os_stop();
+ break;
+ case _IOC_NR(ADSP_CNSL_RESUME):
+ if (!drv_data->adsp_os_suspended) {
+ dev_info(dev, "adsp_consol: OS is not suspended to perform resume.");
+ break;
+ }
+ ret = nvadsp_os_start();
+ if (ret)
+ dev_info(dev, "adsp_consol: OS Resume Failed.");
+ break;
+ case _IOC_NR(ADSP_CNSL_RUN_APP):
+ if (!ACCESS_OK(uarg, sizeof(struct adsp_consol_run_app_arg_t)))
+ return -EACCES;
+ ret = copy_from_user(&app_args, uarg,
+ sizeof(app_args));
+ if (ret) {
+ ret = -EACCES;
+ break;
+ }
+
+ dev_info(dev, "Core ID: %d\n", app_args.core_id);
+ app_args.app_name[NVADSP_NAME_SZ_MAX] = '\0';
+
+#ifdef USE_RUN_APP_API
+ app_args.ctx2 = (uint64_t)nvadsp_run_app(NULL,
+ app_args.app_name,
+ (nvadsp_app_args_t *)&app_args.args[0],
+ NULL, 0, app_args.core_id, true);
+ if (!app_args.ctx2) {
+ dev_info(dev, "adsp_consol: unable to run %s\n",
+ app_args.app_name);
+ return -EINVAL;
+ }
+ if (adsp_app_ctx_add(app_args.ctx2)) {
+ dev_info(dev, "adsp_consol: unable to add %s ctx\n",
+ app_args.app_name);
+ return -EINVAL;
+ }
+#else
+ app_args.ctx1 = (uint64_t)nvadsp_app_load(app_args.app_path,
+ app_args.app_name);
+ if (!app_args.ctx1) {
+ dev_info(dev,
+ "adsp_consol: dynamic app load failed %s\n",
+ app_args.app_name);
+ return -EINVAL;
+ }
+ if (adsp_app_ctx_add(app_args.ctx1)) {
+ dev_info(dev, "adsp_consol: unable to add %s ctx\n",
+ app_args.app_name);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "adsp_consol: calling nvadsp_app_init\n");
+ app_args.ctx2 =
+ (uint64_t)nvadsp_app_init((void *)app_args.ctx1, NULL);
+ if (!app_args.ctx2) {
+ dev_info(dev,
+ "adsp_consol: unable to initilize the app\n");
+ return -EINVAL;
+ }
+ if (adsp_app_ctx_add(app_args.ctx2)) {
+ dev_info(dev, "adsp_consol: unable to add %s ctx\n",
+ app_args.app_name);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "adsp_consol: calling nvadsp_app_start\n");
+ ret = nvadsp_app_start((void *)app_args.ctx2);
+ if (ret) {
+ dev_info(dev, "adsp_consol: unable to start the app\n");
+ break;
+ }
+#endif
+ ret = copy_to_user((void __user *) arg, &app_args,
+ sizeof(struct adsp_consol_run_app_arg_t));
+ if (ret)
+ ret = -EACCES;
+ break;
+ case _IOC_NR(ADSP_CNSL_STOP_APP):
+ if (!ACCESS_OK(uarg, sizeof(struct adsp_consol_run_app_arg_t)))
+ return -EACCES;
+ ret = copy_from_user(&app_args, uarg,
+ sizeof(app_args));
+ if (ret) {
+ ret = -EACCES;
+ break;
+ }
+#ifdef USE_RUN_APP_API
+ if (!app_args.ctx2) {
+ ret = -EACCES;
+ break;
+ }
+ if (adsp_app_ctx_check(app_args.ctx2)) {
+ dev_info(dev, "adsp_consol: unable to check %s ctx\n",
+ app_args.app_name);
+ return -EINVAL;
+ }
+
+ app_args.ctx1 = (uint64_t)
+ ((nvadsp_app_info_t *)app_args.ctx2)->handle;
+
+ nvadsp_exit_app((nvadsp_app_info_t *)app_args.ctx2, false);
+ nvadsp_app_unload((const void *)app_args.ctx1);
+
+ adsp_app_ctx_remove(app_args.ctx2);
+#else
+ if ((!app_args.ctx2) || (!app_args.ctx1)) {
+ ret = -EACCES;
+ break;
+ }
+
+ if (adsp_app_ctx_check(app_args.ctx2) ||
+ adsp_app_ctx_check(app_args.ctx1)) {
+ dev_info(dev, "adsp_consol: unable to check %s ctx\n",
+ app_args.app_name);
+ return -EINVAL;
+ }
+
+ nvadsp_app_deinit((void *)app_args.ctx2);
+ nvadsp_app_unload((void *)app_args.ctx1);
+
+ adsp_app_ctx_remove(app_args.ctx2);
+ adsp_app_ctx_remove(app_args.ctx1);
+#endif
+
+ break;
+ case _IOC_NR(ADSP_CNSL_CLR_BUFFER):
+ break;
+ case _IOC_NR(ADSP_CNSL_OPN_MBX):
+ if (!ACCESS_OK(uarg, sizeof(ctx2)))
+ return -EACCES;
+ ret = copy_from_user(&ctx2, uarg, sizeof(ctx2));
+ if (ret) {
+ ret = -EACCES;
+ break;
+ }
+ if (adsp_app_ctx_check(ctx2)) {
+ dev_info(dev, "adsp_consol: unable to check ctx\n");
+ return -EINVAL;
+ }
+
+ app_info = (nvadsp_app_info_t *)ctx2;
+
+ if (app_info && app_info->mem.shared) {
+ mid = (short *)(app_info->mem.shared);
+ dev_info(dev, "adsp_consol: open %x\n", *mid);
+ mbxid = *mid;
+ }
+
+ ret = nvadsp_mbox_open(&console->app_mbox, &mbxid,
+ "app_mbox", NULL, NULL);
+ if (ret) {
+ pr_err("adsp_consol: Failed to open app mailbox");
+ ret = -EACCES;
+ }
+ break;
+ case _IOC_NR(ADSP_CNSL_CLOSE_MBX):
+ mbox = &console->app_mbox;
+ while (!nvadsp_mbox_recv(mbox, &data, 0, 0))
+ ;
+ ret = nvadsp_mbox_close(mbox);
+ if (ret)
+ break;
+ memset(mbox, 0, sizeof(struct nvadsp_mbox));
+ break;
+ case _IOC_NR(ADSP_CNSL_PUT_MBX):
+ if (!ACCESS_OK(uarg, sizeof(uint32_t)))
+ return -EACCES;
+ ret = copy_from_user(&data, uarg,
+ sizeof(uint32_t));
+ if (ret) {
+ ret = -EACCES;
+ break;
+ }
+ ret = nvadsp_mbox_send(&console->app_mbox, data,
+ NVADSP_MBOX_SMSG, 0, 0);
+ break;
+ case _IOC_NR(ADSP_CNSL_GET_MBX):
+ if (!ACCESS_OK(uarg, sizeof(uint32_t)))
+ return -EACCES;
+ ret = nvadsp_mbox_recv(&console->app_mbox, &data, 0, 0);
+ if (ret)
+ break;
+ ret = copy_to_user(uarg, &data,
+ sizeof(uint32_t));
+ if (ret)
+ ret = -EACCES;
+ break;
+ case _IOC_NR(ADSP_CNSL_PUT_DATA):
+ if (!ACCESS_OK(uarg, sizeof(struct adsp_consol_run_app_arg_t)))
+ return -EACCES;
+ ret = copy_from_user(&data, uarg, sizeof(uint32_t));
+ if (ret) {
+ ret = -EACCES;
+ break;
+ }
+ return nvadsp_mbox_send(&console->shl_snd_mbox, data,
+ NVADSP_MBOX_SMSG, 0, 0);
+ break;
+ default:
+ dev_info(dev, "adsp_consol: invalid command\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static const struct file_operations adsp_console_operations = {
+ .open = adsp_consol_open,
+ .release = adsp_consol_close,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = adsp_consol_ioctl,
+#endif
+ .unlocked_ioctl = adsp_consol_ioctl
+};
+
+int
+adsp_create_cnsl(struct dentry *adsp_debugfs_root, struct nvadsp_cnsl *cnsl)
+{
+ int ret = 0;
+
+ struct device *dev = cnsl->dev;
+
+ if (IS_ERR_OR_NULL(adsp_debugfs_root)) {
+ ret = -ENOENT;
+ goto err_out;
+ }
+
+ if (!debugfs_create_file("adsp_console", S_IRUSR,
+ adsp_debugfs_root, cnsl,
+ &adsp_console_operations)) {
+ dev_err(dev,
+ "unable to create adsp console debug fs file\n");
+ ret = -ENOENT;
+ goto err_out;
+ }
+
+ memset(&cnsl->app_mbox, 0, sizeof(cnsl->app_mbox));
+
+err_out:
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/adsp_console_dbfs.h b/drivers/platform/tegra/nvadsp/adsp_console_dbfs.h
new file mode 100644
index 00000000..add0b159
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/adsp_console_dbfs.h
@@ -0,0 +1,31 @@
+/*
+* adsp_console_dbfs.h
+*
+* A header file for adsp console driver
+*
+* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+*/
+
+#ifndef ADSP_CNSL_DBFS_H
+#define ADSP_CNSL_DBFS_H
+
+struct nvadsp_cnsl {
+ struct device *dev;
+ struct nvadsp_mbox shl_snd_mbox;
+ struct nvadsp_mbox app_mbox;
+};
+
+int
+adsp_create_cnsl(struct dentry *adsp_debugfs_root, struct nvadsp_cnsl *cnsl);
+
+#endif /* ADSP_CNSL_DBFS_H */
diff --git a/drivers/platform/tegra/nvadsp/adsp_cpustat.c b/drivers/platform/tegra/nvadsp/adsp_cpustat.c
new file mode 100644
index 00000000..033a8e50
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/adsp_cpustat.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2015-2016, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "dev.h"
+
+#define ACTMON_DEV_CTRL 0x00
+#define ACTMON_DEV_CTRL_ENB (0x1 << 31)
+#define ACTMON_DEV_CTRL_AT_END_ENB (0x1 << 15)
+#define ACTMON_DEV_CTRL_PERIODIC_ENB (0x1 << 13)
+#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT (0)
+#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK (0xff << 0)
+
+#define ACTMON_DEV_COUNT 0x18
+
+#define ACTMON_DEV_INTR_STATUS 0x20
+#define ACTMON_DEV_INTR_AT_END (0x1 << 27)
+
+#define ACTMON_DEV_COUNT_WEGHT 0x24
+
+#define ACTMON_DEV_SAMPLE_CTRL 0x28
+#define ACTMON_DEV_SAMPLE_CTRL_TICK_65536 (0x1 << 2)
+#define ACTMON_DEV_SAMPLE_CTRL_TICK_256 (0x0 << 1)
+
+#define AMISC_ACTMON_0 0x54
+#define AMISC_ACTMON_CNT_TARGET_ENABLE (0x1 << 31)
+
+#define ACTMON_REG_OFFSET 0x800
+/* milli second divider as SAMPLE_TICK*/
+#define SAMPLE_MS_DIVIDER 65536
+
+
+struct adsp_cpustat {
+ int irq;
+ struct device *device;
+ const char *dev_id;
+ spinlock_t lock;
+ struct clk *ape_clk;
+ struct clk *adsp_clk;
+ unsigned long ape_freq;
+ unsigned long adsp_freq;
+ u64 cur_usage;
+ bool enable;
+ u64 max_usage;
+ void __iomem *base;
+};
+
+static struct adsp_cpustat cpustat;
+
+static struct adsp_cpustat *cpumon;
+
+static inline u32 actmon_readl(u32 offset)
+{
+ return __raw_readl(cpumon->base + offset);
+}
+static inline void actmon_writel(u32 val, u32 offset)
+{
+ __raw_writel(val, cpumon->base + offset);
+}
+
+static inline void actmon_wmb(void)
+{
+ wmb();
+}
+
+static irqreturn_t adsp_cpustat_isr(int irq, void *dev_id)
+{
+ u32 val;
+ unsigned long period, flags;
+
+ spin_lock_irqsave(&cpumon->lock, flags);
+ val = actmon_readl(ACTMON_DEV_INTR_STATUS);
+ actmon_writel(val, ACTMON_DEV_INTR_STATUS);
+
+ if (val & ACTMON_DEV_INTR_AT_END) {
+ period = (255 * SAMPLE_MS_DIVIDER) / cpumon->ape_freq;
+
+ cpumon->cur_usage =
+ ((u64)actmon_readl(ACTMON_DEV_COUNT) * 100) / (period * cpumon->adsp_freq);
+ if (cpumon->cur_usage > cpumon->max_usage)
+ cpumon->max_usage = cpumon->cur_usage;
+ }
+ spin_unlock_irqrestore(&cpumon->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void configure_actmon(void)
+{
+ u32 val;
+
+ /* Set countb weight to 256 */
+ actmon_writel(0x100, ACTMON_DEV_COUNT_WEGHT);
+
+ /* Enable periodic sampling */
+ val = actmon_readl(ACTMON_DEV_CTRL);
+ val |= ACTMON_DEV_CTRL_PERIODIC_ENB;
+
+ /* Set sampling period to max i,e, 255 ape clks */
+ val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
+ val |= (0xFF <<
+ ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
+ & ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
+
+ /* Enable the AT_END interrupt */
+ val |= ACTMON_DEV_CTRL_AT_END_ENB;
+ actmon_writel(val, ACTMON_DEV_CTRL);
+
+ actmon_writel(ACTMON_DEV_SAMPLE_CTRL_TICK_65536,
+ ACTMON_DEV_SAMPLE_CTRL);
+ actmon_wmb();
+}
+
+static void adsp_cpustat_enable(void)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpumon->lock, flags);
+
+ val = actmon_readl(ACTMON_DEV_CTRL);
+ val |= ACTMON_DEV_CTRL_ENB;
+ actmon_writel(val, ACTMON_DEV_CTRL);
+ actmon_wmb();
+
+ enable_irq(cpumon->irq);
+ spin_unlock_irqrestore(&cpumon->lock, flags);
+}
+
+static void adsp_cpustat_disable(void)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpumon->lock, flags);
+ disable_irq(cpumon->irq);
+
+ val = actmon_readl(ACTMON_DEV_CTRL);
+ val &= ~ACTMON_DEV_CTRL_ENB;
+ actmon_writel(val, ACTMON_DEV_CTRL);
+ actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS);
+ actmon_wmb();
+
+ spin_unlock_irqrestore(&cpumon->lock, flags);
+}
+
+#define RW_MODE (S_IWUSR | S_IRUSR)
+#define RO_MODE S_IRUSR
+
+static int cur_usage_get(void *data, u64 *val)
+{
+ *val = cpumon->cur_usage;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cur_usage_fops, cur_usage_get, NULL, "%llu\n");
+
+static int max_usage_get(void *data, u64 *val)
+{
+ *val = cpumon->max_usage;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(max_usage_fops, max_usage_get, NULL, "%llu\n");
+
+static int enable_set(void *data, u64 val)
+{
+ if (cpumon->enable == (bool)val)
+ return 0;
+ cpumon->enable = (bool)val;
+
+ if (cpumon->enable)
+ adsp_cpustat_enable();
+ else
+ adsp_cpustat_disable();
+ return 0;
+}
+
+static int enable_get(void *data, u64 *val)
+{
+ *val = cpumon->enable;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(enable_fops, enable_get, enable_set, "%llu\n");
+
+static int cpustat_debugfs_init(struct nvadsp_drv_data *drv)
+{
+ int ret = -ENOMEM;
+ struct dentry *d, *dir;
+
+ if (!drv->adsp_debugfs_root)
+ return ret;
+ dir = debugfs_create_dir("adsp_cpustat", drv->adsp_debugfs_root);
+ if (!dir)
+ return ret;
+
+ d = debugfs_create_file(
+ "cur_usage", RO_MODE, dir, cpumon, &cur_usage_fops);
+ if (!d)
+ return ret;
+
+ d = debugfs_create_file(
+ "max_usage", RO_MODE, dir, cpumon, &max_usage_fops);
+ if (!d)
+ return ret;
+
+ d = debugfs_create_file(
+ "enable", RW_MODE, dir, cpumon, &enable_fops);
+ if (!d)
+ return ret;
+
+ return 0;
+}
+
+int adsp_cpustat_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ static void __iomem *amisc_base;
+ u32 val;
+ int ret = -EINVAL;
+
+ if (drv->cpustat_initialized)
+ return 0;
+
+ cpumon = &cpustat;
+ spin_lock_init(&cpumon->lock);
+ cpumon->base = drv->base_regs[AMISC] + ACTMON_REG_OFFSET;
+ amisc_base = drv->base_regs[AMISC];
+
+ cpumon->ape_clk = clk_get_sys(NULL, "adsp.ape");
+ if (IS_ERR_OR_NULL(cpumon->ape_clk)) {
+ dev_err(cpumon->device, "Failed to find adsp.ape clk\n");
+ ret = -EINVAL;
+ goto err_ape_clk;
+ }
+
+ ret = clk_prepare_enable(cpumon->ape_clk);
+ if (ret) {
+ dev_err(cpumon->device, "Failed to enable ape clock\n");
+ goto err_ape_enable;
+ }
+ cpumon->ape_freq = clk_get_rate(cpumon->ape_clk) / 1000;
+
+ cpumon->adsp_clk = clk_get_sys(NULL, "adsp_cpu");
+ if (IS_ERR_OR_NULL(cpumon->adsp_clk)) {
+ dev_err(cpumon->device, "Failed to find adsp cpu clock\n");
+ ret = -EINVAL;
+ goto err_adsp_clk;
+ }
+
+ ret = clk_prepare_enable(cpumon->adsp_clk);
+ if (ret) {
+ dev_err(cpumon->device, "Failed to enable adsp cpu clock\n");
+ goto err_adsp_enable;
+ }
+ cpumon->adsp_freq = clk_get_rate(cpumon->adsp_clk) / 1000;
+
+ /* Enable AMISC_ACTMON */
+ val = __raw_readl(amisc_base + AMISC_ACTMON_0);
+ val |= AMISC_ACTMON_CNT_TARGET_ENABLE;
+ __raw_writel(val, amisc_base + AMISC_ACTMON_0);
+
+ /* Clear all interrupts */
+ actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS);
+
+ /* One time configuration of actmon regs */
+ configure_actmon();
+
+ cpumon->irq = drv->agic_irqs[ACTMON_VIRQ];
+ ret = request_irq(cpumon->irq, adsp_cpustat_isr,
+ IRQ_TYPE_LEVEL_HIGH, "adsp_actmon", cpumon);
+ if (ret) {
+ dev_err(cpumon->device, "Failed irq %d request\n", cpumon->irq);
+ goto err_irq;
+ }
+
+ cpustat_debugfs_init(drv);
+
+ drv->cpustat_initialized = true;
+
+ return 0;
+err_irq:
+ clk_disable_unprepare(cpumon->adsp_clk);
+err_adsp_enable:
+ clk_put(cpumon->adsp_clk);
+err_adsp_clk:
+ clk_disable_unprepare(cpumon->ape_clk);
+err_ape_enable:
+ clk_put(cpumon->ape_clk);
+err_ape_clk:
+ return ret;
+}
+
+int adsp_cpustat_exit(struct platform_device *pdev)
+{
+ status_t ret = 0;
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ if (!drv->cpustat_initialized) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ free_irq(cpumon->irq, cpumon);
+ clk_disable_unprepare(cpumon->adsp_clk);
+ clk_put(cpumon->adsp_clk);
+ clk_put(cpumon->ape_clk);
+ drv->cpustat_initialized = false;
+
+end:
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/adsp_dfs.c b/drivers/platform/tegra/nvadsp/adsp_dfs.c
new file mode 100644
index 00000000..3f3dd243
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/adsp_dfs.c
@@ -0,0 +1,877 @@
+/*
+ * adsp_dfs.c
+ *
+ * adsp dynamic frequency scaling
+ *
+ * Copyright (C) 2014-2020, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+#include
+#else
+#include
+#endif
+#include
+
+#include "dev.h"
+#include "ape_actmon.h"
+#include "os.h"
+
+#ifndef CONFIG_TEGRA_ADSP_ACTMON
+void actmon_rate_change(unsigned long freq, bool override)
+{
+
+}
+#endif
+
+#define MBOX_TIMEOUT 5000 /* in ms */
+#define HOST_ADSP_DFS_MBOX_ID 3
+
+enum adsp_dfs_reply {
+ ACK,
+ NACK,
+};
+
+/*
+ * Freqency in Hz.The frequency always needs to be a multiple of 12.8 Mhz and
+ * should be extended with a slab 38.4 Mhz.
+ */
+static unsigned long adsp_cpu_freq_table_t21x[] = {
+ MIN_ADSP_FREQ,
+ MIN_ADSP_FREQ * 2,
+ MIN_ADSP_FREQ * 3,
+ MIN_ADSP_FREQ * 4,
+ MIN_ADSP_FREQ * 5,
+ MIN_ADSP_FREQ * 6,
+ MIN_ADSP_FREQ * 7,
+ MIN_ADSP_FREQ * 8,
+ MIN_ADSP_FREQ * 9,
+ MIN_ADSP_FREQ * 10,
+ MIN_ADSP_FREQ * 11,
+ MIN_ADSP_FREQ * 12,
+ MIN_ADSP_FREQ * 13,
+ MIN_ADSP_FREQ * 14,
+ MIN_ADSP_FREQ * 15,
+ MIN_ADSP_FREQ * 16,
+ MIN_ADSP_FREQ * 17,
+ MIN_ADSP_FREQ * 18,
+ MIN_ADSP_FREQ * 19,
+ MIN_ADSP_FREQ * 20,
+ MIN_ADSP_FREQ * 21,
+};
+
+/*
+ * Frequency in Hz.
+ */
+static unsigned long adsp_cpu_freq_table_t18x[] = {
+ 150000000lu,
+ 300000000lu,
+ 600000000lu,
+};
+
+static unsigned long *adsp_cpu_freq_table;
+static int adsp_cpu_freq_table_size;
+
+struct adsp_dfs_policy {
+ bool enable;
+/* update_freq_flag = TRUE, ADSP ACKed the new freq
+ * = FALSE, ADSP NACKed the new freq
+ */
+ bool update_freq_flag;
+
+ const char *clk_name;
+ unsigned long min; /* in kHz */
+ unsigned long max; /* in kHz */
+ unsigned long cur; /* in kHz */
+ unsigned long cpu_min; /* ADSP min freq(KHz). Remain unchanged */
+ unsigned long cpu_max; /* ADSP max freq(KHz). Remain unchanged */
+
+ struct clk *adsp_clk;
+ struct clk *aclk_clk;
+ struct clk *adsp_cpu_abus_clk;
+ struct nvadsp_mbox mbox;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *root;
+#endif
+ unsigned long ovr_freq;
+};
+
+
+
+#define MAX_SIZE(x, y) (x > y ? x : y)
+#define TIME_IN_STATE_SIZE MAX_SIZE(ARRAY_SIZE(adsp_cpu_freq_table_t21x), \
+ ARRAY_SIZE(adsp_cpu_freq_table_t18x))
+struct adsp_freq_stats {
+ struct device *dev;
+ unsigned long long last_time;
+ int last_index;
+ u64 time_in_state[TIME_IN_STATE_SIZE];
+
+ int state_num;
+};
+
+static struct adsp_dfs_policy *policy;
+static struct adsp_freq_stats freq_stats;
+static struct device *device;
+
+
+static DEFINE_MUTEX(policy_mutex);
+
+static bool is_os_running(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct nvadsp_drv_data *drv_data;
+
+ if (!dev)
+ return false;
+
+ pdev = to_platform_device(dev);
+ drv_data = platform_get_drvdata(pdev);
+
+ if (!drv_data->adsp_os_running) {
+ dev_dbg(&pdev->dev, "%s: adsp os is not loaded\n", __func__);
+ return false;
+ }
+ return true;
+}
+
+static int adsp_clk_get(struct adsp_dfs_policy *policy)
+{
+ struct device_node *node = device->of_node;
+ int ret = 0;
+
+ policy->adsp_clk = devm_clk_get(device, "adsp");
+ if (IS_ERR_OR_NULL(policy->adsp_clk)) {
+ dev_err(device, "unable to find adsp clock\n");
+ ret = PTR_ERR(policy->adsp_clk);
+ }
+
+ if (!of_device_is_compatible(node, "nvidia,tegra210-adsp")) {
+ policy->aclk_clk = devm_clk_get(device, "aclk");
+
+ if (IS_ERR_OR_NULL(policy->aclk_clk)) {
+ dev_err(device, "unable to find aclk clock\n");
+ ret = PTR_ERR(policy->aclk_clk);
+ }
+ } else {
+ policy->adsp_cpu_abus_clk =
+ devm_clk_get(device, "adsp_cpu_abus");
+
+ if (IS_ERR_OR_NULL(policy->adsp_cpu_abus_clk)) {
+ dev_err(device, "unable to find adsp cpu abus clock\n");
+ ret = PTR_ERR(policy->adsp_cpu_abus_clk);
+ }
+ }
+
+ return ret;
+}
+
+static void adsp_clk_put(struct adsp_dfs_policy *policy)
+{
+ if (policy->adsp_cpu_abus_clk)
+ devm_clk_put(device, policy->adsp_cpu_abus_clk);
+
+ if (policy->adsp_clk)
+ devm_clk_put(device, policy->adsp_clk);
+
+ if (policy->aclk_clk)
+ devm_clk_put(device, policy->aclk_clk);
+}
+
+static int adsp_clk_set_rate(struct adsp_dfs_policy *policy,
+ unsigned long freq_hz)
+{
+ struct device_node *node = device->of_node;
+ int ret;
+
+ if (of_device_is_compatible(node, "nvidia,tegra210-adsp"))
+ ret = clk_set_rate(policy->adsp_cpu_abus_clk, freq_hz);
+ else
+ ret = clk_set_rate(policy->aclk_clk, freq_hz);
+
+ return ret;
+}
+
+static unsigned long adsp_clk_get_rate(struct adsp_dfs_policy *policy)
+{
+ return clk_get_rate(policy->adsp_clk);
+}
+
+static void adsp_cpu_freq_table_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ if (adsp_cpu_freq_table)
+ return;
+
+ if (of_device_is_compatible(node, "nvidia,tegra210-adsp")) {
+ adsp_cpu_freq_table = adsp_cpu_freq_table_t21x;
+ adsp_cpu_freq_table_size = ARRAY_SIZE(adsp_cpu_freq_table_t21x);
+ } else {
+ adsp_cpu_freq_table = adsp_cpu_freq_table_t18x;
+ adsp_cpu_freq_table_size = ARRAY_SIZE(adsp_cpu_freq_table_t18x);
+ }
+}
+
+/* Expects and returns freq in Hz as table is formmed in terms of Hz */
+static unsigned long adsp_get_target_freq(unsigned long tfreq, int *index)
+{
+ int i;
+ int size = adsp_cpu_freq_table_size;
+
+ if (tfreq <= adsp_cpu_freq_table[0]) {
+ *index = 0;
+ return adsp_cpu_freq_table[0];
+ }
+
+ if (tfreq >= adsp_cpu_freq_table[size - 1]) {
+ *index = size - 1;
+ return adsp_cpu_freq_table[size - 1];
+ }
+
+ for (i = 1; i < size; i++) {
+ if ((tfreq <= adsp_cpu_freq_table[i]) &&
+ (tfreq > adsp_cpu_freq_table[i - 1])) {
+ *index = i;
+ return adsp_cpu_freq_table[i];
+ }
+ }
+
+ return 0;
+}
+
+static struct adsp_dfs_policy dfs_policy = {
+ .enable = 1,
+ .clk_name = "adsp_cpu",
+};
+
+static int adsp_update_freq_handshake(unsigned long tfreq_hz, int index)
+{
+ struct nvadsp_mbox *mbx = &policy->mbox;
+ enum adsp_dfs_reply reply;
+ int ret;
+
+ dev_dbg(device, "sending change in freq(hz):%lu\n", tfreq_hz);
+ /*
+ * Ask adsp to do action upon change in freq. ADSP and Host need to
+ * maintain the same freq table.
+ */
+ ret = nvadsp_mbox_send(mbx, index,
+ NVADSP_MBOX_SMSG, true, 100);
+ if (ret) {
+ dev_err(device, "%s:host to adsp, mbox_send failure. ret:%d\n",
+ __func__, ret);
+ policy->update_freq_flag = false;
+ goto err_out;
+ }
+
+ ret = nvadsp_mbox_recv(&policy->mbox, &reply, true, MBOX_TIMEOUT);
+ if (ret) {
+ dev_err(device, "%s:host to adsp, mbox_receive failure. ret:%d\n",
+ __func__, ret);
+ policy->update_freq_flag = false;
+ goto err_out;
+ }
+
+ switch (reply) {
+ case ACK:
+ /* Set Update freq flag */
+ dev_dbg(device, "adsp freq change status:ACK\n");
+ policy->update_freq_flag = true;
+ break;
+ case NACK:
+ /* Set Update freq flag */
+ dev_dbg(device, "adsp freq change status:NACK\n");
+ policy->update_freq_flag = false;
+ break;
+ default:
+ dev_err(device, "Error: adsp freq change status\n");
+ }
+
+ dev_dbg(device, "%s:status received from adsp: %s, tfreq(hz):%lu\n",
+ __func__,
+ policy->update_freq_flag == true ? "ACK" : "NACK",
+ tfreq_hz);
+err_out:
+ return ret;
+}
+
+/*
+ * update_freq - update adsp freq and ask adsp to change timer as
+ * change in adsp freq.
+ * freq_khz - target frequency in KHz
+ * return - final freq got set.
+ * - 0, incase of error.
+ *
+ * Note - Policy->cur would be updated via rate
+ * change notifier, when freq is changed in hw
+ *
+ */
+static unsigned long update_freq(unsigned long freq_khz)
+{
+ struct nvadsp_drv_data *drv = dev_get_drvdata(device);
+ unsigned long tfreq_hz, old_freq_khz;
+ u32 efreq;
+ int index;
+ int ret;
+
+ if (!is_os_running(device)) {
+ dev_err(device, "adsp os is not running\n");
+ return 0;
+ }
+
+ tfreq_hz = adsp_get_target_freq(freq_khz * 1000, &index);
+ if (!tfreq_hz) {
+ dev_err(device, "unable get the target freq\n");
+ return 0;
+ }
+
+ old_freq_khz = policy->cur;
+
+ if ((tfreq_hz / 1000) == old_freq_khz) {
+ dev_dbg(device, "old and new target_freq is same\n");
+ return 0;
+ }
+
+ ret = adsp_clk_set_rate(policy, tfreq_hz);
+ if (ret) {
+ dev_err(device, "failed to set adsp freq:%luhz err:%d\n",
+ tfreq_hz, ret);
+ policy->update_freq_flag = false;
+ return 0;
+ }
+
+ efreq = adsp_to_emc_freq(tfreq_hz / 1000);
+
+ ret = nvadsp_set_bw(drv, efreq);
+ if (ret) {
+ policy->update_freq_flag = false;
+ goto err_out;
+ }
+
+ /*
+ * On tegra > t210, as os_args->adsp_freq_hz is used to know adsp cpu
+ * clk rate and there is no need to set up timer prescalar. So skip
+ * communicating adsp cpu clk rate update to adspos using mbox
+ */
+ if (!of_device_is_compatible(device->of_node, "nvidia,tegra210-adsp"))
+ policy->update_freq_flag = true;
+ else
+ adsp_update_freq_handshake(tfreq_hz, index);
+
+ /*
+ * Use os_args->adsp_freq_hz to update adsp cpu clk rate
+ * for adspos firmware, which uses this shared variable
+ * to get the clk rate for EDF, etc.
+ */
+ if (policy->update_freq_flag) {
+ struct nvadsp_shared_mem *sm = drv->shared_adsp_os_data;
+
+ sm->os_args.adsp_freq_hz = tfreq_hz;
+ }
+
+err_out:
+ if (!policy->update_freq_flag) {
+ ret = adsp_clk_set_rate(policy, old_freq_khz * 1000);
+ if (ret) {
+ dev_err(device, "failed to resume adsp freq(khz):%lu\n",
+ old_freq_khz);
+ policy->update_freq_flag = false;
+ }
+
+ efreq = adsp_to_emc_freq(old_freq_khz);
+
+ ret = nvadsp_set_bw(drv, efreq);
+ if (ret)
+ policy->update_freq_flag = false;
+
+ tfreq_hz = old_freq_khz * 1000;
+ }
+ return tfreq_hz / 1000;
+}
+
+/* Set adsp dfs policy min freq(Khz) */
+static int policy_min_set(void *data, u64 val)
+{
+ int ret = -EINVAL;
+ unsigned long min = (unsigned long)val;
+
+ if (!is_os_running(device))
+ return ret;
+
+ mutex_lock(&policy_mutex);
+ if (!policy->enable) {
+ dev_err(device, "adsp dfs policy is not enabled\n");
+ goto exit_out;
+ }
+
+ if (min == policy->min)
+ goto exit_out;
+ else if (min < policy->cpu_min)
+ min = policy->cpu_min;
+ else if (min >= policy->cpu_max)
+ min = policy->cpu_max;
+
+ if (min > policy->cur) {
+ min = update_freq(min);
+ if (min)
+ policy->cur = min;
+ }
+
+ if (min)
+ policy->min = min;
+
+ ret = 0;
+exit_out:
+ mutex_unlock(&policy_mutex);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define RW_MODE (S_IWUSR | S_IRUSR)
+#define RO_MODE S_IRUSR
+
+/* Get adsp dfs staus: 0: disabled, 1: enabled */
+static int dfs_enable_get(void *data, u64 *val)
+{
+ mutex_lock(&policy_mutex);
+ *val = policy->enable;
+ mutex_unlock(&policy_mutex);
+
+ return 0;
+}
+
+/* Enable/disable adsp dfs */
+static int dfs_enable_set(void *data, u64 val)
+{
+ mutex_lock(&policy_mutex);
+ policy->enable = (bool) val;
+ mutex_unlock(&policy_mutex);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(enable_fops, dfs_enable_get,
+ dfs_enable_set, "%llu\n");
+
+/* Get adsp dfs policy min freq(KHz) */
+static int policy_min_get(void *data, u64 *val)
+{
+ if (!is_os_running(device))
+ return -EINVAL;
+
+ mutex_lock(&policy_mutex);
+ *val = policy->min;
+ mutex_unlock(&policy_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(min_fops, policy_min_get,
+ policy_min_set, "%llu\n");
+
+/* Get adsp dfs policy max freq(KHz) */
+static int policy_max_get(void *data, u64 *val)
+{
+ if (!is_os_running(device))
+ return -EINVAL;
+
+ mutex_lock(&policy_mutex);
+ *val = policy->max;
+ mutex_unlock(&policy_mutex);
+ return 0;
+}
+
+/* Set adsp dfs policy max freq(KHz) */
+static int policy_max_set(void *data, u64 val)
+{
+ int ret = -EINVAL;
+ unsigned long max = (unsigned long)val;
+
+ if (!is_os_running(device))
+ return ret;
+
+ mutex_lock(&policy_mutex);
+ if (!policy->enable) {
+ dev_err(device, "adsp dfs policy is not enabled\n");
+ goto exit_out;
+ }
+
+ if (!max || ((max > policy->cpu_max) || (max == policy->max)))
+ goto exit_out;
+
+ else if (max <= policy->cpu_min)
+ max = policy->cpu_min;
+
+ if (max < policy->cur)
+ max = update_freq(max);
+
+ if (max)
+ policy->cur = policy->max = max;
+ ret = 0;
+exit_out:
+ mutex_unlock(&policy_mutex);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(max_fops, policy_max_get,
+ policy_max_set, "%llu\n");
+
+/* Get adsp dfs policy's current freq */
+static int policy_cur_get(void *data, u64 *val)
+{
+ if (!is_os_running(device))
+ return -EINVAL;
+
+ mutex_lock(&policy_mutex);
+ *val = policy->cur;
+ mutex_unlock(&policy_mutex);
+
+ return 0;
+}
+
+/* Set adsp dfs policy cur freq(Khz) */
+static int policy_cur_set(void *data, u64 val)
+{
+ int ret = -EINVAL;
+ unsigned long cur = (unsigned long)val;
+
+ if (!is_os_running(device))
+ return ret;
+
+ mutex_lock(&policy_mutex);
+ if (policy->enable) {
+ dev_err(device, "adsp dfs is enabled, should be disabled first\n");
+ goto exit_out;
+ }
+
+ if (!cur || cur == policy->cur)
+ goto exit_out;
+
+ /* Check tfreq policy sanity */
+ if (cur < policy->min)
+ cur = policy->min;
+ else if (cur > policy->max)
+ cur = policy->max;
+
+ cur = update_freq(cur);
+ if (cur)
+ policy->cur = cur;
+ ret = 0;
+exit_out:
+ mutex_unlock(&policy_mutex);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cur_fops, policy_cur_get,
+ policy_cur_set, "%llu\n");
+
+static void adspfreq_stats_update(void)
+{
+ unsigned long long cur_time;
+
+ cur_time = get_jiffies_64();
+ freq_stats.time_in_state[freq_stats.last_index] += cur_time -
+ freq_stats.last_time;
+ freq_stats.last_time = cur_time;
+}
+
+/*
+ * Print residency in each freq levels
+ */
+static void dump_stats_table(struct seq_file *s, struct adsp_freq_stats *fstats)
+{
+ int i;
+
+ mutex_lock(&policy_mutex);
+ if (is_os_running(device))
+ adspfreq_stats_update();
+
+ for (i = 0; i < fstats->state_num; i++) {
+ u64 jiffies64 = nsecs_to_jiffies64(fstats->time_in_state[i]);
+ seq_printf(s, "%lu %llu\n",
+ (long unsigned int)(adsp_cpu_freq_table[i] / 1000),
+ jiffies_64_to_clock_t(jiffies64));
+ }
+ mutex_unlock(&policy_mutex);
+}
+
+static int show_time_in_state(struct seq_file *s, void *data)
+{
+ struct adsp_freq_stats *fstats =
+ (struct adsp_freq_stats *) (s->private);
+
+ dump_stats_table(s, fstats);
+ return 0;
+}
+
+static int stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_time_in_state, inode->i_private);
+}
+
+static const struct file_operations time_in_state_fops = {
+ .open = stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int adsp_dfs_debugfs_init(struct platform_device *pdev)
+{
+ int ret = -ENOMEM;
+ struct dentry *d, *root;
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+
+ if (!drv->adsp_debugfs_root)
+ return ret;
+
+ root = debugfs_create_dir("adsp_dfs", drv->adsp_debugfs_root);
+ if (!root)
+ return ret;
+
+ policy->root = root;
+
+ d = debugfs_create_file("enable", RW_MODE, root, NULL,
+ &enable_fops);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file("min_freq", RW_MODE, root, NULL,
+ &min_fops);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file("max_freq", RW_MODE, root,
+ NULL, &max_fops);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file("cur_freq", RW_MODE, root, NULL,
+ &cur_fops);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file("time_in_state", RO_MODE,
+ root, &freq_stats,
+ &time_in_state_fops);
+ if (!d)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(root);
+ policy->root = NULL;
+ dev_err(&pdev->dev,
+ "unable to create adsp logger debug fs file\n");
+ return ret;
+}
+#endif
+
+/*
+ * Set target freq.
+ * @params:
+ * freq: adsp freq in KHz
+ */
+void adsp_cpu_set_rate(unsigned long freq)
+{
+ mutex_lock(&policy_mutex);
+
+ if (!policy->enable) {
+ dev_dbg(device, "adsp dfs policy is not enabled\n");
+ goto exit_out;
+ }
+
+ if (freq < policy->min)
+ freq = policy->min;
+ else if (freq > policy->max)
+ freq = policy->max;
+
+ freq = update_freq(freq);
+ if (freq)
+ policy->cur = freq;
+exit_out:
+ mutex_unlock(&policy_mutex);
+}
+
+/*
+ * Override adsp freq and reinit actmon counters
+ *
+ * @params:
+ * freq: adsp freq in KHz
+ * return - final freq set
+ * - 0 incase of error
+ *
+ */
+unsigned long adsp_override_freq(unsigned long req_freq_khz)
+{
+ unsigned long ret_freq = 0, freq;
+ int index;
+
+ if (!is_os_running(device)) {
+ pr_err("%s: adsp os is not in running state.\n", __func__);
+ return 0;
+ }
+
+ mutex_lock(&policy_mutex);
+
+ freq = req_freq_khz;
+
+ if (freq < policy->min)
+ freq = policy->min;
+ else if (freq > policy->max)
+ freq = policy->max;
+
+ freq = adsp_get_target_freq(freq * 1000, &index);
+ if (!freq) {
+ dev_warn(device,
+ "req freq:%lukhz. unable get the target freq.\n",
+ req_freq_khz);
+ goto exit_out;
+ }
+ freq = freq / 1000; /* In KHz */
+
+ if (freq == policy->cur) {
+ ret_freq = freq;
+ goto exit_out;
+ }
+
+ policy->ovr_freq = freq;
+ ret_freq = update_freq(freq);
+ if (ret_freq)
+ policy->cur = ret_freq;
+
+ if (ret_freq != freq) {
+ dev_warn(device,
+ "req freq:%lukhz. freq override to %lukhz rejected.\n",
+ req_freq_khz, freq);
+ policy->ovr_freq = 0;
+ goto exit_out;
+ }
+
+exit_out:
+ mutex_unlock(&policy_mutex);
+ return ret_freq;
+}
+EXPORT_SYMBOL(adsp_override_freq);
+
+/*
+ * Set min ADSP freq.
+ *
+ * @params:
+ * freq: adsp freq in KHz
+ */
+void adsp_update_dfs_min_rate(unsigned long freq)
+{
+ policy_min_set(NULL, freq);
+}
+EXPORT_SYMBOL(adsp_update_dfs_min_rate);
+
+/* Enable / disable dynamic freq scaling */
+void adsp_update_dfs(bool val)
+{
+ mutex_lock(&policy_mutex);
+ policy->enable = val;
+ mutex_unlock(&policy_mutex);
+}
+
+/* Should be called after ADSP os is loaded */
+int adsp_dfs_core_init(struct platform_device *pdev)
+{
+ int size = adsp_cpu_freq_table_size;
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ uint16_t mid = HOST_ADSP_DFS_MBOX_ID;
+ int ret = 0;
+ u32 efreq;
+
+ if (drv->dfs_initialized)
+ return 0;
+
+ device = &pdev->dev;
+ policy = &dfs_policy;
+
+ /* Set up adsp cpu freq table as per chip */
+ if (!adsp_cpu_freq_table)
+ adsp_cpu_freq_table_setup(pdev);
+
+ ret = adsp_clk_get(policy);
+ if (ret)
+ goto end;
+
+ policy->max = policy->cpu_max = drv->adsp_freq; /* adsp_freq in KHz */
+
+ policy->min = policy->cpu_min = adsp_cpu_freq_table[0] / 1000;
+
+ policy->cur = adsp_clk_get_rate(policy) / 1000;
+
+ efreq = adsp_to_emc_freq(policy->cur);
+
+ ret = nvadsp_set_bw(drv, efreq);
+ if (ret)
+ goto end;
+
+ adsp_get_target_freq(policy->cur * 1000, &freq_stats.last_index);
+ freq_stats.last_time = get_jiffies_64();
+ freq_stats.state_num = size;
+ freq_stats.dev = &pdev->dev;
+ memset(&freq_stats.time_in_state, 0, sizeof(freq_stats.time_in_state));
+
+ ret = nvadsp_mbox_open(&policy->mbox, &mid, "dfs_comm", NULL, NULL);
+ if (ret) {
+ dev_info(&pdev->dev, "unable to open mailbox. ret:%d\n", ret);
+ goto end;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ adsp_dfs_debugfs_init(pdev);
+#endif
+ drv->dfs_initialized = true;
+
+ dev_dbg(&pdev->dev, "adsp dfs initialized ....\n");
+ return ret;
+end:
+
+ adsp_clk_put(policy);
+
+ return ret;
+}
+
+int adsp_dfs_core_exit(struct platform_device *pdev)
+{
+ status_t ret = 0;
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+
+ /* return if dfs is not initialized */
+ if (!drv->dfs_initialized)
+ return -ENODEV;
+
+ ret = nvadsp_mbox_close(&policy->mbox);
+ if (ret)
+ dev_info(&pdev->dev,
+ "adsp dfs exit failed: mbox close error. ret:%d\n", ret);
+
+ adsp_clk_put(policy);
+
+ drv->dfs_initialized = false;
+ dev_dbg(&pdev->dev, "adsp dfs has exited ....\n");
+
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/adsp_lpthread.c b/drivers/platform/tegra/nvadsp/adsp_lpthread.c
new file mode 100644
index 00000000..f71122ec
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/adsp_lpthread.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "dev.h"
+
+#define RW_MODE (S_IWUSR | S_IRUGO)
+
+enum adsp_lpthread_state {
+ ADSP_LPTHREAD_STOP,
+ ADSP_LPTHREAD_START,
+ ADSP_LPTHREAD_PAUSE,
+};
+
+struct adsp_lpthread_shared_state_t {
+ uint16_t mbox_id;
+};
+
+enum adsp_lpthread_mbx_cmd {
+ ADSP_LPTHREAD_CMD_RESUME = 0,
+ ADSP_LPTHREAD_CMD_PAUSE,
+ ADSP_LPTHREAD_CMD_CLOSE,
+};
+
+struct adsp_lpthread {
+ bool lpthread_initialized;
+ bool adsp_os_suspended;
+ bool lpthread_paused;
+ bool lpthread_resumed;
+ bool lpthread_closed;
+ nvadsp_app_handle_t app_handle;
+ nvadsp_app_info_t *app_info;
+};
+
+static struct adsp_lpthread lpthread_obj;
+static struct adsp_lpthread *lpthread;
+
+static struct nvadsp_mbox mbox;
+static struct adsp_lpthread_shared_state_t *adsp_lpthread;
+
+/* Initialize adsp_lpthread app and mailbox */
+int adsp_lpthread_init(bool is_adsp_suspended)
+{
+ nvadsp_app_handle_t handle;
+ nvadsp_app_info_t *app_info;
+ int ret;
+
+ handle = nvadsp_app_load("adsp_lpthread", "adsp_lpthread.elf");
+ if (!handle)
+ return -1;
+
+ app_info = nvadsp_app_init(handle, NULL);
+ if (IS_ERR_OR_NULL(app_info)) {
+ pr_err("unable to init app adsp_lpthread\n");
+ return -1;
+ }
+
+ ret = nvadsp_app_start(app_info);
+ if (ret) {
+ pr_err("unable to start app adsp_lpthread\n");
+ return -1;
+ }
+
+ lpthread->app_info = app_info;
+ lpthread->app_handle = handle;
+
+ adsp_lpthread =
+ (struct adsp_lpthread_shared_state_t *)app_info->mem.shared;
+ ret = nvadsp_mbox_open(&mbox, &adsp_lpthread->mbox_id,
+ "adsp_lpthread", NULL, NULL);
+ if (ret) {
+ pr_err("Failed to open mbox %d for adsp_lpthread app",
+ adsp_lpthread->mbox_id);
+ return -1;
+ }
+
+ /* Start timer is adsp is not in suspended state */
+ if (!is_adsp_suspended) {
+ ret = adsp_lpthread_resume();
+ return ret;
+ }
+
+ return 0;
+}
+
+int adsp_lpthread_resume(void)
+{
+ int ret;
+
+ ret = nvadsp_mbox_send(&mbox, ADSP_LPTHREAD_CMD_RESUME,
+ NVADSP_MBOX_SMSG, 0, 0);
+ if (ret)
+ pr_err("%s: nvadsp_mbox_send() failed: %d, ret = %d\n",
+ __func__, adsp_lpthread->mbox_id, ret);
+
+ return ret;
+}
+
+int adsp_lpthread_pause(void)
+{
+ int ret;
+
+ ret = nvadsp_mbox_send(&mbox, ADSP_LPTHREAD_CMD_PAUSE,
+ NVADSP_MBOX_SMSG, 0, 0);
+ if (ret)
+ pr_err("%s: nvadsp_mbox_send() failed: %d, ret = %d\n",
+ __func__, adsp_lpthread->mbox_id, ret);
+
+ return ret;
+}
+
+int adsp_lpthread_uninit(void)
+{
+ int ret;
+
+ ret = nvadsp_mbox_send(&mbox, ADSP_LPTHREAD_CMD_CLOSE,
+ NVADSP_MBOX_SMSG, 0, 0);
+ if (ret)
+ pr_err("%s: nvadsp_mbox_send() failed: %d, ret = %d\n",
+ __func__, adsp_lpthread->mbox_id, ret);
+
+ nvadsp_mbox_close(&mbox);
+
+ nvadsp_exit_app((nvadsp_app_info_t *)lpthread->app_info, false);
+
+ nvadsp_app_unload((const void *)lpthread->app_handle);
+
+ return ret;
+}
+
+int adsp_usage_set(unsigned int val)
+{
+ int ret = 0;
+
+ switch (val) {
+
+ case ADSP_LPTHREAD_START:
+ if (lpthread->lpthread_initialized &&
+ lpthread->lpthread_resumed) {
+ pr_info("ADSP Usage App already running\n");
+ break;
+ }
+
+ if (!lpthread->lpthread_initialized) {
+ ret = adsp_lpthread_init(lpthread->adsp_os_suspended);
+ pr_info("Initializing lpthread\n");
+ lpthread->lpthread_initialized = true;
+ } else {
+ ret = adsp_lpthread_resume();
+ pr_info("Resuming lpthread\n");
+ }
+ lpthread->lpthread_resumed = true;
+ lpthread->lpthread_paused = false;
+ lpthread->lpthread_closed = false;
+ break;
+
+ case ADSP_LPTHREAD_PAUSE:
+ if (!lpthread->lpthread_initialized) {
+ pr_info("ADSP Usage App not initialized\n");
+ break;
+ }
+ pr_info("Pausing lpthread\n");
+ ret = adsp_lpthread_pause();
+ lpthread->lpthread_resumed = false;
+ lpthread->lpthread_paused = true;
+ lpthread->lpthread_closed = false;
+ break;
+
+ case ADSP_LPTHREAD_STOP:
+ if (!lpthread->lpthread_initialized) {
+ pr_info("ADSP Usage App not initialized\n");
+ break;
+ }
+ pr_info("Exiting lpthread\n");
+ ret = adsp_lpthread_uninit();
+ lpthread->lpthread_resumed = false;
+ lpthread->lpthread_paused = false;
+ lpthread->lpthread_closed = true;
+ lpthread->lpthread_initialized = false;
+ break;
+
+ default:
+ pr_err("ADSP Usage App: Invalid input\n");
+ ret = 0;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(adsp_usage_set);
+unsigned int adsp_usage_get(void)
+{
+ if (lpthread->lpthread_initialized && lpthread->lpthread_resumed)
+ return ADSP_LPTHREAD_START;
+
+ if (lpthread->lpthread_initialized && lpthread->lpthread_paused)
+ return ADSP_LPTHREAD_PAUSE;
+
+ return ADSP_LPTHREAD_STOP;
+}
+EXPORT_SYMBOL(adsp_usage_get);
+
+int adsp_lpthread_entry(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+
+ lpthread = &lpthread_obj;
+
+ drv->lpthread_initialized = true;
+ lpthread->adsp_os_suspended = false;
+
+ return 0;
+}
+
+int adsp_lpthread_exit(struct platform_device *pdev)
+{
+ status_t ret = 0;
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+
+ if (!drv->lpthread_initialized)
+ ret = -EINVAL;
+ drv->lpthread_initialized = false;
+
+ return ret;
+}
+
+int adsp_lpthread_set_suspend(bool is_suspended)
+{
+ lpthread->adsp_os_suspended = is_suspended;
+ return 0;
+}
+
+int adsp_lpthread_get_state(void)
+{
+ if (lpthread->lpthread_initialized && lpthread->lpthread_resumed)
+ return 1;
+ else
+ return 0;
+}
diff --git a/drivers/platform/tegra/nvadsp/adsp_shared_struct.h b/drivers/platform/tegra/nvadsp/adsp_shared_struct.h
new file mode 100644
index 00000000..b7615a7e
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/adsp_shared_struct.h
@@ -0,0 +1,184 @@
+/*
+ * adsp_shared_struct.h
+ *
+ * A header file containing shared data structures shared with ADSP OS
+ *
+ * Copyright (C) 2015-2022 NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ADSP_SHARED_STRUCT
+#define __ADSP_SHARED_STRUCT
+#include
+
+#define APP_LOADER_MBOX_ID 1
+
+#define ADSP_APP_FLAG_START_ON_BOOT 0x1
+
+#define ADSP_OS_LOAD_TIMEOUT 5000 /* 5000 ms */
+
+#define DRAM_DEBUG_LOG_SIZE 0x4000 /* 16 KB */
+
+#define NVADSP_NAME_SZ 128
+
+struct app_mem_size {
+ uint64_t dram;
+ uint64_t dram_shared;
+ uint64_t dram_shared_wc;
+ uint64_t aram;
+ uint64_t aram_x;
+} __packed;
+
+struct adsp_shared_app {
+ char name[NVADSP_NAME_SZ];
+ struct app_mem_size mem_size;
+ int32_t mod_ptr;
+ int32_t flags;
+ int32_t dram_data_ptr;
+ int32_t shared_data_ptr;
+ int32_t shared_wc_data_ptr;
+ char version[16];
+} __packed;
+
+/* ADSP app loader message queue */
+struct run_app_instance_data {
+ uint32_t adsp_mod_ptr;
+ uint64_t host_ref;
+ uint32_t adsp_ref;
+ uint32_t dram_data_ptr;
+ uint32_t dram_shared_ptr;
+ uint32_t dram_shared_wc_ptr;
+ uint32_t aram_ptr;
+ uint32_t aram_flag;
+ uint32_t aram_x_ptr;
+ uint32_t aram_x_flag;
+ struct app_mem_size mem_size;
+ nvadsp_app_args_t app_args;
+ uint32_t stack_size;
+ uint32_t core_id;
+ uint32_t message;
+} __packed;
+
+struct app_loader_data {
+ int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
+ struct run_app_instance_data app_init;
+} __packed;
+
+union app_loader_message {
+ msgq_message_t msgq_msg;
+ struct app_loader_data data;
+} __aligned(4);
+
+struct adsp_os_message_header {
+ int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
+ uint32_t message;
+} __packed;
+
+/* ADSP app complete message queue */
+struct app_complete_status_data {
+ struct adsp_os_message_header header;
+ uint64_t host_ref;
+ uint32_t adsp_ref;
+ int32_t status;
+} __packed;
+
+struct adsp_static_app_data {
+ struct adsp_os_message_header header;
+ struct adsp_shared_app shared_app;
+} __packed;
+
+union app_complete_status_message {
+ msgq_message_t msgq_msg;
+ struct app_complete_status_data complete_status_data;
+ struct adsp_static_app_data static_app_data;
+} __aligned(4);
+
+
+/*ADSP message pool structure */
+#define ADSP_MAX_MSGQ_SIZE 8192
+#define ADSP_MAX_MSGQ_WSIZE (ADSP_MAX_MSGQ_SIZE / sizeof(int32_t))
+#define ADSP_MSGQ_MAX_QUEUE_WSIZE \
+ (ADSP_MAX_MSGQ_WSIZE - (int32_t)MSGQ_HEADER_WSIZE)
+
+union app_loader_msgq {
+ msgq_t msgq;
+ struct {
+ int32_t header[MSGQ_HEADER_WSIZE];
+ int32_t queue[ADSP_MSGQ_MAX_QUEUE_WSIZE];
+ };
+};
+
+/* ADSP APP shared message pool */
+#pragma pack(8)
+struct nvadsp_app_shared_msg_pool {
+ union app_loader_msgq app_loader_send_message;
+ union app_loader_msgq app_loader_recv_message;
+};
+#pragma pack()
+
+/*ADSP shated OS args */
+struct nvadsp_os_args {
+ uint64_t adsp_freq_hz;
+ int32_t timer_prescalar;
+ char logger[DRAM_DEBUG_LOG_SIZE];
+ uint32_t dynamic_app_support;
+ uint32_t chip_id;
+ char reserved[120];
+} __packed;
+
+/* ARM MODE REGS */
+struct arm_mode_regs_shared {
+ uint32_t fiq_r13, fiq_r14;
+ uint32_t irq_r13, irq_r14;
+ uint32_t svc_r13, svc_r14;
+ uint32_t abt_r13, abt_r14;
+ uint32_t und_r13, und_r14;
+ uint32_t sys_r13, sys_r14;
+} __packed;
+
+/* ARM FAULT FRAME */
+struct arm_fault_frame_shared {
+ uint32_t spsr;
+ uint32_t usp;
+ uint32_t ulr;
+ uint32_t r[13];
+ uint32_t pc;
+} __packed;
+
+/* ADSP ARM EXCEPTION CONTEXT */
+struct nvadsp_exception_context {
+ struct arm_fault_frame_shared frame;
+ struct arm_mode_regs_shared regs;
+ uint32_t stack_addr;
+ uint32_t stack_dump[32];
+ uint32_t exception_reason;
+} __packed;
+
+/* ADSP OS info/status. Keep in sync with firmware. */
+#define MAX_OS_VERSION_BUF 32
+struct nvadsp_os_info {
+ char version[MAX_OS_VERSION_BUF];
+ char reserved[128];
+} __packed;
+
+/* ADSP OS shared memory */
+#pragma pack(8)
+struct nvadsp_shared_mem {
+ struct nvadsp_app_shared_msg_pool app_shared_msg_pool;
+ struct nvadsp_os_args os_args;
+ struct nvadsp_os_info os_info;
+ struct nvadsp_exception_context exception_context;
+};
+#pragma pack()
+
+
+#endif /* __ADSP_SHARED_STRUCT */
diff --git a/drivers/platform/tegra/nvadsp/adspff.c b/drivers/platform/tegra/nvadsp/adspff.c
new file mode 100644
index 00000000..acca6cf4
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/adspff.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define pr_fmt(fmt) "adspff: " fmt
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include "adspff.h"
+#include "dev.h"
+
+
+#define ADSPFF_MAX_OPEN_FILES (32)
+
+struct file_struct {
+ struct file *fp;
+ uint8_t file_name[ADSPFF_MAX_FILENAME_SIZE];
+ unsigned int flags;
+ unsigned long long wr_offset;
+ unsigned long long rd_offset;
+ struct list_head list;
+};
+
+static struct list_head file_list;
+static spinlock_t adspff_lock;
+static int open_count;
+
+/******************************************************************************
+* Kernel file functions
+******************************************************************************/
+
+static struct file *file_open(const char *path, int flags, int rights)
+{
+ struct file *filp = NULL;
+ mm_segment_t oldfs;
+ int err = 0;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ filp = filp_open(path, flags, rights);
+ set_fs(oldfs);
+ if (IS_ERR(filp)) {
+ err = PTR_ERR(filp);
+ return NULL;
+ }
+ return filp;
+}
+
+static void file_close(struct file *file)
+{
+ filp_close(file, NULL);
+}
+
+static int file_write(struct file *file, unsigned long long *offset,
+ unsigned char *data, unsigned int size)
+{
+ mm_segment_t oldfs;
+ int ret = 0;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ ret = vfs_write(file, (const char __user *)data, size, offset);
+
+ set_fs(oldfs);
+ return ret;
+}
+
+static uint32_t file_read(struct file *file, unsigned long long *offset,
+ unsigned char *data, unsigned int size)
+{
+ mm_segment_t oldfs;
+ uint32_t ret = 0;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ ret = vfs_read(file, (char __user *)data, size, offset);
+
+ set_fs(oldfs);
+
+ return ret;
+}
+
+static uint32_t file_size(struct file *file)
+{
+ mm_segment_t oldfs;
+ uint32_t size = 0;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ size = vfs_llseek(file, 0, SEEK_END);
+
+ vfs_llseek(file, 0, SEEK_SET);
+
+ set_fs(oldfs);
+
+ return size;
+}
+
+/******************************************************************************
+* ADSPFF file functions
+******************************************************************************/
+
+static struct adspff_shared_state_t *adspff;
+static struct nvadsp_mbox rx_mbox;
+
+/** *
+ * w - open for writing (file need not exist) *
+ * a - open for appending (file need not exist) *
+ * r+ - open for reading and writing, start at beginning *
+ * w+ - open for reading and writing (overwrite file) *
+ * a+ - open for reading and writing (append if file exists) */
+
+static void set_flags(union adspff_message_t *m, unsigned int *flags)
+{
+ if (0 == strcmp(m->msg.payload.fopen_msg.modes, "r+"))
+ *flags = O_RDWR;
+
+ else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "w+"))
+ *flags = O_CREAT | O_RDWR | O_TRUNC;
+
+ else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "a+"))
+ *flags = O_APPEND | O_RDWR;
+
+ else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "r"))
+ *flags = O_RDONLY;
+
+ else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "w"))
+ *flags = O_CREAT | O_WRONLY | O_TRUNC;
+
+ else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "a"))
+ *flags = O_CREAT | O_APPEND | O_WRONLY;
+
+ else
+ *flags = O_CREAT | O_RDWR;
+}
+
+/*
+ * checks if file is already opened
+ * if yes, then returns the struct file_struct for the file
+ * if no, then allocates a file_struct and adds to the list
+ * and returns the pointer to the newly allocated file_struct
+ * if ADSPFF_MAX_OPEN_FILES already open, returns NULL
+ */
+static struct file_struct *check_file_opened(const char *path)
+{
+ struct file_struct *file = NULL;
+ struct list_head *pos;
+
+ /* assuming files opened by ADSP will
+ * never be actually closed in kernel
+ */
+ list_for_each(pos, &file_list) {
+ file = list_entry(pos, struct file_struct, list);
+ if (!file->fp)
+ break;
+ if (!strncmp(path, file->file_name,
+ ADSPFF_MAX_FILENAME_SIZE)) {
+ break;
+ }
+ file = NULL;
+ }
+
+ if (file != NULL)
+ return file;
+
+ if (open_count == ADSPFF_MAX_OPEN_FILES) {
+ pr_err("adspff: %d files already opened\n",
+ ADSPFF_MAX_OPEN_FILES);
+ file = NULL;
+ } else {
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ open_count++;
+ list_add_tail(&file->list, &file_list);
+ }
+ return file;
+}
+
+static void adspff_fopen(void)
+{
+ union adspff_message_t *message;
+ union adspff_message_t *msg_recv;
+ unsigned int flags = 0;
+ int ret = 0;
+ struct file_struct *file;
+
+
+ message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
+ if (!message)
+ return;
+
+
+ msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
+ if (!msg_recv) {
+ kfree(message);
+ return;
+ }
+
+ message->msgq_msg.size = MSGQ_MSG_SIZE(struct fopen_msg_t);
+
+ ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
+ (msgq_message_t *)message);
+
+ if (ret < 0) {
+ pr_err("fopen Dequeue failed %d.", ret);
+ kfree(message);
+ kfree(msg_recv);
+ return;
+ }
+
+ file = check_file_opened(message->msg.payload.fopen_msg.fname);
+ if (file && !file->fp) {
+ /* open a new file */
+ set_flags(message, &flags);
+ pr_info("adspff: opening file %s\n",
+ message->msg.payload.fopen_msg.fname);
+
+ file->fp = file_open(
+ (const char *)message->msg.payload.fopen_msg.fname,
+ flags, 0777); /* S_IRWXU | S_IRWXG | S_IRWXO */
+
+ file->wr_offset = 0;
+ file->rd_offset = 0;
+ memcpy(file->file_name,
+ message->msg.payload.fopen_msg.fname,
+ ADSPFF_MAX_FILENAME_SIZE);
+ file->flags = flags;
+ }
+
+ if (file && !file->fp) {
+ file = NULL;
+ pr_err("File not found - %s\n",
+ (const char *) message->msg.payload.fopen_msg.fname);
+ }
+
+ msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct fopen_recv_msg_t);
+ msg_recv->msg.payload.fopen_recv_msg.file = (int64_t)file;
+
+ ret = msgq_queue_message(&adspff->msgq_recv.msgq,
+ (msgq_message_t *)msg_recv);
+ if (ret < 0) {
+ pr_err("fopen Enqueue failed %d.", ret);
+
+ if (file) {
+ file_close(file->fp);
+ file->fp = NULL;
+ }
+
+ kfree(message);
+ kfree(msg_recv);
+ return;
+ }
+
+ nvadsp_mbox_send(&rx_mbox, adspff_cmd_fopen_recv,
+ NVADSP_MBOX_SMSG, 0, 0);
+ kfree(message);
+ kfree(msg_recv);
+}
+
+static inline unsigned int is_read_file(struct file_struct *file)
+{
+ return ((!file->flags) || (file->flags & O_RDWR));
+}
+
+static inline unsigned int is_write_file(struct file_struct *file)
+{
+ return file->flags & (O_WRONLY | O_RDWR);
+}
+
+static void adspff_fclose(void)
+{
+ union adspff_message_t *message;
+ struct file_struct *file = NULL;
+ int32_t ret = 0;
+
+ message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
+ if (!message)
+ return;
+
+ message->msgq_msg.size = MSGQ_MSG_SIZE(struct fclose_msg_t);
+
+ ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
+ (msgq_message_t *)message);
+
+ if (ret < 0) {
+ pr_err("fclose Dequeue failed %d.", ret);
+ kfree(message);
+ return;
+ }
+
+ file = (struct file_struct *)message->msg.payload.fclose_msg.file;
+ if (file) {
+ if ((file->flags & O_APPEND) == 0) {
+ if (is_read_file(file))
+ file->rd_offset = 0;
+ if (is_write_file(file))
+ file->wr_offset = 0;
+ }
+ }
+ kfree(message);
+}
+
+static void adspff_fsize(void)
+{
+ union adspff_message_t *msg_recv;
+ union adspff_message_t message;
+ struct file_struct *file = NULL;
+ int32_t ret = 0;
+ uint32_t size = 0;
+
+ msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
+ msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct ack_msg_t);
+
+ message.msgq_msg.size = MSGQ_MSG_SIZE(struct fsize_msg_t);
+ ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
+ (msgq_message_t *)&message);
+
+ if (ret < 0) {
+ pr_err("fsize Dequeue failed %d.", ret);
+ kfree(msg_recv);
+ return;
+ }
+ file = (struct file_struct *)message.msg.payload.fsize_msg.file;
+ if (file) {
+ size = file_size(file->fp);
+ }
+
+ /* send ack */
+ msg_recv->msg.payload.ack_msg.size = size;
+ ret = msgq_queue_message(&adspff->msgq_recv.msgq,
+ (msgq_message_t *)msg_recv);
+
+ if (ret < 0) {
+ pr_err("fsize Enqueue failed %d.", ret);
+ kfree(msg_recv);
+ return;
+ }
+ nvadsp_mbox_send(&rx_mbox, adspff_cmd_ack,
+ NVADSP_MBOX_SMSG, 0, 0);
+ kfree(msg_recv);
+}
+
+static void adspff_fwrite(void)
+{
+ union adspff_message_t message;
+ union adspff_message_t *msg_recv;
+ struct file_struct *file = NULL;
+ int ret = 0;
+ uint32_t size = 0;
+ uint32_t bytes_to_write = 0;
+ uint32_t bytes_written = 0;
+
+ msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
+ if (!msg_recv)
+ return;
+
+ msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct ack_msg_t);
+
+ message.msgq_msg.size = MSGQ_MSG_SIZE(struct fwrite_msg_t);
+ ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
+ (msgq_message_t *)&message);
+ if (ret < 0) {
+ pr_err("fwrite Dequeue failed %d.", ret);
+ kfree(msg_recv);
+ return;
+ }
+
+ file = (struct file_struct *)message.msg.payload.fwrite_msg.file;
+ size = message.msg.payload.fwrite_msg.size;
+
+ bytes_to_write = ((adspff->write_buf.read_index + size) < ADSPFF_SHARED_BUFFER_SIZE) ?
+ size : (ADSPFF_SHARED_BUFFER_SIZE - adspff->write_buf.read_index);
+ ret = file_write(file->fp, &file->wr_offset,
+ adspff->write_buf.data + adspff->write_buf.read_index, bytes_to_write);
+ bytes_written += ret;
+
+ if ((size - bytes_to_write) > 0) {
+ ret = file_write(file->fp, &file->wr_offset,
+ adspff->write_buf.data, size - bytes_to_write);
+ bytes_written += ret;
+ }
+
+ adspff->write_buf.read_index =
+ (adspff->write_buf.read_index + size) % ADSPFF_SHARED_BUFFER_SIZE;
+
+ /* send ack */
+ msg_recv->msg.payload.ack_msg.size = bytes_written;
+ ret = msgq_queue_message(&adspff->msgq_recv.msgq,
+ (msgq_message_t *)msg_recv);
+
+ if (ret < 0) {
+ pr_err("adspff: fwrite Enqueue failed %d.", ret);
+ kfree(msg_recv);
+ return;
+ }
+ nvadsp_mbox_send(&rx_mbox, adspff_cmd_ack,
+ NVADSP_MBOX_SMSG, 0, 0);
+ kfree(msg_recv);
+}
+
+static void adspff_fread(void)
+{
+ union adspff_message_t *message;
+ union adspff_message_t *msg_recv;
+ struct file_struct *file = NULL;
+ uint32_t bytes_free;
+ uint32_t wi = adspff->read_buf.write_index;
+ uint32_t ri = adspff->read_buf.read_index;
+ uint8_t can_wrap = 0;
+ uint32_t size = 0, size_read = 0;
+ int32_t ret = 0;
+
+ if (ri <= wi) {
+ bytes_free = ADSPFF_SHARED_BUFFER_SIZE - wi + ri - 1;
+ can_wrap = 1;
+ } else {
+ bytes_free = ri - wi - 1;
+ can_wrap = 0;
+ }
+ message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
+ if (!message)
+ return;
+
+ msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
+ if (!msg_recv) {
+ kfree(message);
+ return;
+ }
+
+ msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct ack_msg_t);
+ message->msgq_msg.size = MSGQ_MSG_SIZE(struct fread_msg_t);
+
+ ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
+ (msgq_message_t *)message);
+
+ if (ret < 0) {
+ pr_err("fread Dequeue failed %d.", ret);
+ kfree(message);
+ kfree(msg_recv);
+ return;
+ }
+
+ file = (struct file_struct *)message->msg.payload.fread_msg.file;
+ size = message->msg.payload.fread_msg.size;
+ if (bytes_free < size) {
+ size_read = 0;
+ goto send_ack;
+ }
+
+ if (can_wrap) {
+ uint32_t bytes_to_read = (size < (ADSPFF_SHARED_BUFFER_SIZE - wi)) ?
+ size : (ADSPFF_SHARED_BUFFER_SIZE - wi);
+ ret = file_read(file->fp, &file->rd_offset,
+ adspff->read_buf.data + wi, bytes_to_read);
+ size_read = ret;
+ if (ret < bytes_to_read)
+ goto send_ack;
+ if ((size - bytes_to_read) > 0) {
+ ret = file_read(file->fp, &file->rd_offset,
+ adspff->read_buf.data, size - bytes_to_read);
+ size_read += ret;
+ goto send_ack;
+ }
+ } else {
+ ret = file_read(file->fp, &file->rd_offset,
+ adspff->read_buf.data + wi, size);
+ size_read = ret;
+ goto send_ack;
+ }
+send_ack:
+ msg_recv->msg.payload.ack_msg.size = size_read;
+ ret = msgq_queue_message(&adspff->msgq_recv.msgq,
+ (msgq_message_t *)msg_recv);
+
+ if (ret < 0) {
+ pr_err("fread Enqueue failed %d.", ret);
+ kfree(message);
+ kfree(msg_recv);
+ return;
+ }
+ adspff->read_buf.write_index =
+ (adspff->read_buf.write_index + size_read) % ADSPFF_SHARED_BUFFER_SIZE;
+
+ nvadsp_mbox_send(&rx_mbox, adspff_cmd_ack,
+ NVADSP_MBOX_SMSG, 0, 0);
+ kfree(message);
+ kfree(msg_recv);
+}
+
+#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+static const struct sched_param param = {
+ .sched_priority = 1,
+};
+#endif
+static struct task_struct *adspff_kthread;
+static struct list_head adspff_kthread_msgq_head;
+static wait_queue_head_t wait_queue;
+
+struct adspff_kthread_msg {
+ uint32_t msg_id;
+ struct list_head list;
+};
+
+
+static int adspff_kthread_fn(void *data)
+{
+ int ret = 0;
+ struct adspff_kthread_msg *kmsg;
+ unsigned long flags;
+
+ while (1) {
+
+ ret = wait_event_interruptible(wait_queue, kthread_should_stop()
+ || !list_empty(&adspff_kthread_msgq_head));
+
+ if (kthread_should_stop())
+ do_exit(0);
+
+ if (!list_empty(&adspff_kthread_msgq_head)) {
+ kmsg = list_first_entry(&adspff_kthread_msgq_head,
+ struct adspff_kthread_msg, list);
+ switch (kmsg->msg_id) {
+ case adspff_cmd_fopen:
+ adspff_fopen();
+ break;
+ case adspff_cmd_fclose:
+ adspff_fclose();
+ break;
+ case adspff_cmd_fwrite:
+ adspff_fwrite();
+ break;
+ case adspff_cmd_fread:
+ adspff_fread();
+ break;
+ case adspff_cmd_fsize:
+ adspff_fsize();
+ break;
+ default:
+ pr_warn("adspff: kthread unsupported msg %d\n",
+ kmsg->msg_id);
+ }
+ spin_lock_irqsave(&adspff_lock, flags);
+ list_del(&kmsg->list);
+ spin_unlock_irqrestore(&adspff_lock, flags);
+ kfree(kmsg);
+ }
+ }
+
+ do_exit(ret);
+}
+
+/******************************************************************************
+* ADSP mailbox message handler
+******************************************************************************/
+
+
+static int adspff_msg_handler(uint32_t msg, void *data)
+{
+ unsigned long flags;
+ struct adspff_kthread_msg *kmsg;
+
+ spin_lock_irqsave(&adspff_lock, flags);
+ kmsg = kzalloc(sizeof(*kmsg), GFP_ATOMIC);
+ if (!kmsg) {
+ spin_unlock_irqrestore(&adspff_lock, flags);
+ return -ENOMEM;
+ }
+
+ kmsg->msg_id = msg;
+ list_add_tail(&kmsg->list, &adspff_kthread_msgq_head);
+ wake_up(&wait_queue);
+ spin_unlock_irqrestore(&adspff_lock, flags);
+
+ return 0;
+}
+
+static int adspff_set(void *data, u64 val)
+{
+ struct file_struct *file;
+ struct list_head *pos, *n;
+
+ if (val != 1)
+ return 0;
+ list_for_each_safe(pos, n, &file_list) {
+ file = list_entry(pos, struct file_struct, list);
+ list_del(pos);
+ if (file->fp)
+ file_close(file->fp);
+ kfree(file);
+ }
+
+ open_count = 0;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adspff_fops, NULL, adspff_set, "%llu\n");
+
+#ifdef CONFIG_DEBUG_FS
+static int adspff_debugfs_init(struct nvadsp_drv_data *drv)
+{
+ int ret = -ENOMEM;
+ struct dentry *d, *dir;
+
+ if (!drv->adsp_debugfs_root)
+ return ret;
+ dir = debugfs_create_dir("adspff", drv->adsp_debugfs_root);
+ if (!dir)
+ return ret;
+
+ d = debugfs_create_file(
+ "close_files", 0200, /* S_IWUSR */
+ dir, NULL, &adspff_fops);
+ if (!d)
+ return ret;
+
+ return 0;
+}
+#endif
+
+int adspff_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ nvadsp_app_handle_t handle;
+ nvadsp_app_info_t *app_info;
+
+#ifdef CONFIG_DEBUG_FS
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+#endif
+
+ handle = nvadsp_app_load("adspff", "adspff.elf");
+ if (!handle)
+ return -ENOENT;
+
+ app_info = nvadsp_app_init(handle, NULL);
+ if (IS_ERR_OR_NULL(app_info)) {
+ pr_err("unable to init app adspff\n");
+ return -1;
+ }
+
+ adspff_kthread = kthread_create(adspff_kthread_fn,
+ NULL, "adspp_kthread");
+ if ((adspff_kthread == ERR_PTR(-ENOMEM)) ||
+ (adspff_kthread == ERR_PTR(-EINTR))) {
+ pr_err("adspff kthread_create failed, error = %s\n",
+ (adspff_kthread == ERR_PTR(-ENOMEM)) ?
+ "-ENOMEM" : "-EINTR");
+ return -1;
+ }
+
+ adspff = ADSPFF_SHARED_STATE(app_info->mem.shared);
+
+ ret = nvadsp_mbox_open(&rx_mbox, &adspff->mbox_id,
+ "adspff", adspff_msg_handler, NULL);
+
+ if (ret < 0) {
+ pr_err("Failed to open mbox %d", adspff->mbox_id);
+ return -1;
+ }
+
+ spin_lock_init(&adspff_lock);
+
+#ifdef CONFIG_DEBUG_FS
+ ret = adspff_debugfs_init(drv);
+ if (ret)
+ pr_warn("adspff: failed to create debugfs entry\n");
+#endif
+
+ INIT_LIST_HEAD(&adspff_kthread_msgq_head);
+ INIT_LIST_HEAD(&file_list);
+
+ init_waitqueue_head(&wait_queue);
+
+#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+ sched_setscheduler(adspff_kthread, SCHED_FIFO, ¶m);
+#else
+ sched_set_fifo_low(adspff_kthread);
+#endif
+
+ get_task_struct(adspff_kthread);
+ wake_up_process(adspff_kthread);
+
+ return ret;
+}
+
+void adspff_exit(void)
+{
+ nvadsp_mbox_close(&rx_mbox);
+ kthread_stop(adspff_kthread);
+ put_task_struct(adspff_kthread);
+}
diff --git a/drivers/platform/tegra/nvadsp/adspff.h b/drivers/platform/tegra/nvadsp/adspff.h
new file mode 100644
index 00000000..05cd40ee
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/adspff.h
@@ -0,0 +1,145 @@
+/*
+* tegra_adspff.h - Shared ADSPFF interface between Tegra ADSP File
+* System driver and ADSP side user space code.
+* Copyright (c) 2016-2019 NVIDIA Corporation. All rights reserved.
+*
+* NVIDIA Corporation and its licensors retain all intellectual property
+* and proprietary rights in and to this software, related documentation
+* and any modifications thereto. Any use, reproduction, disclosure or
+* distribution of this software and related documentation without an express
+* license agreement from NVIDIA Corporation is strictly prohibited.
+*/
+
+
+#ifndef _TEGRA_ADSPFF_H_
+#define _TEGRA_ADSPFF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/******************************************************************************
+* Defines
+******************************************************************************/
+
+
+/* TODO: fine tuning */
+#define ADSPFF_MSG_QUEUE_WSIZE 1024
+#define ADSPFF_WRITE_DATA_SIZE 512
+#define ADSPFF_READ_DATA_SIZE 1024
+#define ADSPFF_SHARED_BUFFER_SIZE (128 * 1024)
+#define ADSPFF_MAX_FILENAME_SIZE (250)
+
+/**
+ * adspff_mbx_cmd: commands exchanged using mailbox.
+ *
+ * @adspff_cmd_fopen: open file on host
+ * @adspff_cmd_fclose: close file on host
+ * @adspff_cmd_fwrite: write data in an open file on host
+ * @adspff_cmd_fread: read data from an open file on host
+ */
+
+enum adspff_mbx_cmd {
+ adspff_cmd_fopen = 0,
+ adspff_cmd_fclose,
+ adspff_cmd_fwrite,
+ adspff_cmd_fread,
+ adspff_cmd_fopen_recv,
+ adspff_cmd_ack,
+ adspff_cmd_fsize,
+};
+
+
+/******************************************************************************
+* Types
+******************************************************************************/
+
+/* supported message payloads */
+struct fopen_msg_t {
+ uint8_t fname[ADSPFF_MAX_FILENAME_SIZE];
+ uint8_t modes[3];
+};
+
+struct fwrite_msg_t {
+ int64_t file;
+ int32_t size;
+};
+
+struct fread_msg_t {
+ int64_t file;
+ int32_t size;
+};
+
+struct fclose_msg_t {
+ int64_t file;
+};
+
+struct fopen_recv_msg_t {
+ int64_t file;
+};
+
+struct fsize_msg_t {
+ int64_t file;
+};
+
+struct ack_msg_t {
+ int32_t size;
+};
+
+#pragma pack(4)
+/* app message definition */
+union adspff_message_t {
+ msgq_message_t msgq_msg;
+ struct {
+ int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
+ union {
+ struct fopen_msg_t fopen_msg;
+ struct fwrite_msg_t fwrite_msg;
+ struct fread_msg_t fread_msg;
+ struct fclose_msg_t fclose_msg;
+ struct fopen_recv_msg_t fopen_recv_msg;
+ struct ack_msg_t ack_msg;
+ struct fsize_msg_t fsize_msg;
+ } payload;
+ } msg;
+};
+
+/* app queue definition */
+union adspff_msgq_t {
+ msgq_t msgq;
+ struct {
+ int32_t header[MSGQ_HEADER_WSIZE];
+ int32_t queue[ADSPFF_MSG_QUEUE_WSIZE];
+ } app_msgq;
+};
+#pragma pack()
+
+#define MSGQ_MSG_SIZE(x) \
+(((sizeof(x) + sizeof(int32_t) - 1) & (~(sizeof(int32_t)-1))) >> 2)
+
+
+/**
+ * ADSPFF state structure shared between ADSP & CPU
+ */
+typedef struct {
+ uint32_t write_index;
+ uint32_t read_index;
+ uint8_t data[ADSPFF_SHARED_BUFFER_SIZE];
+} adspff_shared_buffer_t;
+
+struct adspff_shared_state_t {
+ uint16_t mbox_id;
+ union adspff_msgq_t msgq_recv;
+ union adspff_msgq_t msgq_send;
+ adspff_shared_buffer_t write_buf;
+ adspff_shared_buffer_t read_buf;
+};
+
+#define ADSPFF_SHARED_STATE(x) \
+((struct adspff_shared_state_t *)x)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* #ifndef TEGRA_ADSPFF_H_ */
diff --git a/drivers/platform/tegra/nvadsp/amc.c b/drivers/platform/tegra/nvadsp/amc.c
new file mode 100644
index 00000000..64db3d0c
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/amc.c
@@ -0,0 +1,201 @@
+/*
+ * amc.c
+ *
+ * AMC and ARAM handling
+ *
+ * Copyright (C) 2014-2021, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+#include
+#else
+#include
+#endif
+
+#include "dev.h"
+#include "amc.h"
+
+static struct platform_device *nvadsp_pdev;
+static struct nvadsp_drv_data *nvadsp_drv_data;
+
+static inline u32 amc_readl(u32 reg)
+{
+ return readl(nvadsp_drv_data->base_regs[AMC] + reg);
+}
+
+static inline void amc_writel(u32 val, u32 reg)
+{
+ writel(val, nvadsp_drv_data->base_regs[AMC] + reg);
+}
+
+static void wmemcpy_to_aram(u32 to_aram, const u32 *from_mem, size_t wlen)
+{
+ u32 base, offset;
+
+ base = to_aram & AMC_ARAM_APERTURE_DATA_LEN;
+ amc_writel(base, AMC_ARAM_APERTURE_BASE);
+
+ offset = to_aram % AMC_ARAM_APERTURE_DATA_LEN;
+
+ while (wlen--) {
+ if (offset == AMC_ARAM_APERTURE_DATA_LEN) {
+ base += AMC_ARAM_APERTURE_DATA_LEN;
+ amc_writel(base, AMC_ARAM_APERTURE_BASE);
+ offset = 0;
+ }
+
+ amc_writel(*from_mem, AMC_ARAM_APERTURE_DATA_START + offset);
+ from_mem++;
+ offset += 4;
+ }
+}
+
+static void wmemcpy_from_aram(u32 *to_mem, const u32 from_aram, size_t wlen)
+{
+ u32 base, offset;
+
+ base = from_aram & AMC_ARAM_APERTURE_DATA_LEN;
+ amc_writel(base, AMC_ARAM_APERTURE_BASE);
+
+ offset = from_aram % AMC_ARAM_APERTURE_DATA_LEN;
+
+ while (wlen--) {
+ if (offset == AMC_ARAM_APERTURE_DATA_LEN) {
+ base += AMC_ARAM_APERTURE_DATA_LEN;
+ amc_writel(base, AMC_ARAM_APERTURE_BASE);
+ offset = 0;
+ }
+
+ *to_mem = amc_readl(AMC_ARAM_APERTURE_DATA_START + offset);
+ to_mem++;
+ offset += 4;
+ }
+}
+
+int nvadsp_aram_save(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
+
+ wmemcpy_from_aram(d->state.aram, AMC_ARAM_START, AMC_ARAM_WSIZE);
+ return 0;
+}
+
+int nvadsp_aram_restore(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *ndd = platform_get_drvdata(pdev);
+
+ wmemcpy_to_aram(AMC_ARAM_START, ndd->state.aram, AMC_ARAM_WSIZE);
+ return 0;
+}
+
+int nvadsp_amc_save(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
+ u32 val, offset = 0;
+ int i = 0;
+
+ offset = 0x0;
+ val = readl(d->base_regs[AMC] + offset);
+ d->state.amc_regs[i++] = val;
+
+ offset = 0x8;
+ val = readl(d->base_regs[AMC] + offset);
+ d->state.amc_regs[i++] = val;
+
+ return 0;
+}
+
+int nvadsp_amc_restore(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
+ u32 val, offset = 0;
+ int i = 0;
+
+ offset = 0x0;
+ val = d->state.amc_regs[i++];
+ writel(val, d->base_regs[AMC] + offset);
+
+ offset = 0x8;
+ val = d->state.amc_regs[i++];
+ writel(val, d->base_regs[AMC] + offset);
+
+ return 0;
+}
+
+static irqreturn_t nvadsp_amc_error_int_handler(int irq, void *devid)
+{
+ u32 val, addr, status, intr = 0;
+
+ status = amc_readl(AMC_INT_STATUS);
+ addr = amc_readl(AMC_ERROR_ADDR);
+
+ if (status & AMC_INT_STATUS_ARAM) {
+ /*
+ * Ignore addresses lesser than AMC_ERROR_ADDR_IGNORE (4k)
+ * as those are spurious ones due a hardware issue.
+ */
+ if (!(nvadsp_drv_data->chip_data->amc_err_war) ||
+ (addr > AMC_ERROR_ADDR_IGNORE))
+ pr_info("nvadsp: invalid ARAM access. address: 0x%x\n",
+ addr);
+
+ intr |= AMC_INT_INVALID_ARAM_ACCESS;
+ }
+
+ if (status & AMC_INT_STATUS_REG) {
+ pr_info("nvadsp: invalid AMC reg access. address: 0x%x\n",
+ addr);
+ intr |= AMC_INT_INVALID_REG_ACCESS;
+ }
+
+ val = amc_readl(AMC_INT_CLR);
+ val |= intr;
+ amc_writel(val, AMC_INT_CLR);
+
+ return IRQ_HANDLED;
+}
+
+void nvadsp_free_amc_interrupts(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *node;
+
+ node = dev->of_node;
+
+ if (!is_tegra_hypervisor_mode())
+ devm_free_irq(dev, drv->agic_irqs[AMC_ERR_VIRQ], pdev);
+}
+
+int nvadsp_setup_amc_interrupts(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *node;
+ int ret = 0;
+
+ node = dev->of_node;
+ nvadsp_pdev = pdev;
+ nvadsp_drv_data = drv;
+
+ if (!is_tegra_hypervisor_mode())
+ ret = devm_request_irq(dev, drv->agic_irqs[AMC_ERR_VIRQ],
+ nvadsp_amc_error_int_handler, 0,
+ "AMC error int", pdev);
+
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/amc.h b/drivers/platform/tegra/nvadsp/amc.h
new file mode 100644
index 00000000..2bf7d699
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/amc.h
@@ -0,0 +1,58 @@
+/*
+ * amc.h
+ *
+ * A header file for AMC/ARAM
+ *
+ * Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEGRA_NVADSP_AMC_H
+#define __TEGRA_NVADSP_AMC_H
+
+#define AMC_CONFIG 0x00
+#define AMC_CONFIG_ALIASING (1 << 0)
+#define AMC_CONFIG_CARVEOUT (1 << 1)
+#define AMC_CONFIG_ERR_RESP (1 << 2)
+#define AMC_INT_STATUS (0x04)
+#define AMC_INT_STATUS_ARAM (1 << 0)
+#define AMC_INT_STATUS_REG (1 << 1)
+#define AMC_INT_MASK 0x08
+#define AMC_INT_SET 0x0C
+#define AMC_INT_CLR 0x10
+#define AMC_INT_INVALID_ARAM_ACCESS (1 << 0)
+#define AMC_INT_INVALID_REG_ACCESS (1 << 1)
+#define AMC_ERROR_ADDR 0x14
+
+#define AMC_ERROR_ADDR_IGNORE SZ_4K
+
+#define AMC_REGS 0x1000
+
+#define AMC_ARAM_APERTURE_BASE 0x28
+#define AMC_ARAM_APERTURE_DATA_START 0x800
+#define AMC_ARAM_APERTURE_DATA_LEN 0x800 /* 2KB */
+
+#define AMC_ARAM_ALIAS0 0x00400000
+#define AMC_ARAM_ALIAS1 0x00500000
+#define AMC_ARAM_ALIAS2 0x00600000
+#define AMC_ARAM_ALIAS3 0x00700000
+
+#define AMC_ARAM_START 0
+#define AMC_ARAM_SIZE SZ_64K
+#define AMC_ARAM_WSIZE (AMC_ARAM_SIZE >> 2)
+
+int nvadsp_aram_save(struct platform_device *pdev);
+int nvadsp_aram_restore(struct platform_device *pdev);
+int nvadsp_amc_save(struct platform_device *pdev);
+int nvadsp_amc_restore(struct platform_device *pdev);
+
+#endif /* __TEGRA_NVADSP_AMC_H */
diff --git a/drivers/platform/tegra/nvadsp/amisc.h b/drivers/platform/tegra/nvadsp/amisc.h
new file mode 100644
index 00000000..b127bb5a
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/amisc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * amisc.h - AMISC register access
+ *
+ * Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
+ *
+ */
+
+#ifndef __TEGRA_NVADSP_AMISC_H
+#define __TEGRA_NVADSP_AMISC_H
+
+#include "dev.h"
+
+#define AMISC_ADSP_STATUS (0x14)
+#define AMISC_ADSP_L2_CLKSTOPPED (1 << 30)
+#define AMISC_ADSP_L2_IDLE (1 << 31)
+
+static inline u32 amisc_readl(struct nvadsp_drv_data *drv_data, u32 reg)
+{
+ return readl(drv_data->base_regs[AMISC] + reg);
+}
+
+#endif /* __TEGRA_NVADSP_AMISC_H */
diff --git a/drivers/platform/tegra/nvadsp/ape_actmon.c b/drivers/platform/tegra/nvadsp/ape_actmon.c
new file mode 100644
index 00000000..3db82adb
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/ape_actmon.c
@@ -0,0 +1,984 @@
+/*
+ * Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "ape_actmon.h"
+#include "dev.h"
+
+#define ACTMON_DEV_CTRL 0x00
+#define ACTMON_DEV_CTRL_ENB (0x1 << 31)
+#define ACTMON_DEV_CTRL_UP_WMARK_NUM_SHIFT 26
+#define ACTMON_DEV_CTRL_UP_WMARK_NUM_MASK (0x7 << 26)
+#define ACTMON_DEV_CTRL_DOWN_WMARK_NUM_SHIFT 21
+#define ACTMON_DEV_CTRL_DOWN_WMARK_NUM_MASK (0x7 << 21)
+#define ACTMON_DEV_CTRL_UP_WMARK_ENB (0x1 << 19)
+#define ACTMON_DEV_CTRL_DOWN_WMARK_ENB (0x1 << 18)
+#define ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB (0x1 << 17)
+#define ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB (0x1 << 16)
+#define ACTMON_DEV_CTRL_AT_END_ENB (0x1 << 15)
+#define ACTMON_DEV_CTRL_PERIODIC_ENB (0x1 << 13)
+#define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
+#define ACTMON_DEV_CTRL_K_VAL_MASK (0x7 << 10)
+#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT (0)
+#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK (0xff << 0)
+
+#define ACTMON_DEV_UP_WMARK 0x04
+#define ACTMON_DEV_DOWN_WMARK 0x08
+#define ACTMON_DEV_AVG_UP_WMARK 0x0c
+#define ACTMON_DEV_AVG_DOWN_WMARK 0x10
+#define ACTMON_DEV_INIT_AVG 0x14
+
+#define ACTMON_DEV_COUNT 0x18
+#define ACTMON_DEV_AVG_COUNT 0x1c
+
+#define ACTMON_DEV_INTR_STATUS 0x20
+#define ACTMON_DEV_INTR_UP_WMARK (0x1 << 31)
+#define ACTMON_DEV_INTR_DOWN_WMARK (0x1 << 30)
+#define ACTMON_DEV_INTR_AVG_DOWN_WMARK (0x1 << 29)
+#define ACTMON_DEV_INTR_AVG_UP_WMARK (0x1 << 28)
+
+#define ACTMON_DEV_COUNT_WEGHT 0x24
+
+#define ACTMON_DEV_SAMPLE_CTRL 0x28
+#define ACTMON_DEV_SAMPLE_CTRL_TICK_65536 (0x1 << 2)
+#define ACTMON_DEV_SAMPLE_CTRL_TICK_256 (0x0 << 1)
+
+#define AMISC_ACTMON_0 0x54
+#define AMISC_ACTMON_CNT_TARGET_ENABLE (0x1 << 31)
+#define ACTMON_DEFAULT_AVG_WINDOW_LOG2 7
+/* 1/10 of % i.e 60 % of max freq */
+#define ACTMON_DEFAULT_AVG_BAND 6
+#define ACTMON_MAX_REG_OFFSET 0x2c
+/* TBD: These would come via dts file */
+#define ACTMON_REG_OFFSET 0x800
+/* milli second divider as SAMPLE_TICK*/
+#define SAMPLE_MS_DIVIDER 65536
+/* Sample period in ms */
+#define ACTMON_DEFAULT_SAMPLING_PERIOD 20
+#define AVG_COUNT_THRESHOLD 100000
+
+static struct actmon ape_actmon;
+static struct actmon *apemon;
+
+/* APE activity monitor: Samples ADSP activity */
+static struct actmon_dev actmon_dev_adsp = {
+ .reg = 0x000,
+ .clk_name = "adsp_cpu",
+
+ /* ADSP suspend activity floor */
+ .suspend_freq = 51200,
+
+ /* min step by which we want to boost in case of sudden boost request */
+ .boost_freq_step = 51200,
+
+ /* % of boost freq for boosting up */
+ .boost_up_coef = 200,
+
+ /*
+ * % of boost freq for boosting down. Should be boosted down by
+ * exponential down
+ */
+ .boost_down_coef = 80,
+
+ /*
+ * % of device freq collected in a sample period set as boost up
+ * threshold. boost interrupt is generated when actmon_count
+ * (absolute actmon count in a sample period)
+ * crosses this threshold consecutively by up_wmark_window.
+ */
+ .boost_up_threshold = 95,
+
+ /*
+ * % of device freq collected in a sample period set as boost down
+ * threshold. boost interrupt is generated when actmon_count(raw_count)
+ * crosses this threshold consecutively by down_wmark_window.
+ */
+ .boost_down_threshold = 80,
+
+ /*
+ * No of times raw counts hits the up_threshold to generate an
+ * interrupt
+ */
+ .up_wmark_window = 4,
+
+ /*
+ * No of times raw counts hits the down_threshold to generate an
+ * interrupt.
+ */
+ .down_wmark_window = 8,
+
+ /*
+ * No of samples = 2^ avg_window_log2 for calculating exponential moving
+ * average.
+ */
+ .avg_window_log2 = ACTMON_DEFAULT_AVG_WINDOW_LOG2,
+
+ /*
+ * "weight" is used to scale the count to match the device freq
+ * When 256 adsp active cpu clock are generated, actmon count
+ * is increamented by 1. Making weight as 256 ensures that 1 adsp active
+ * clk increaments actmon_count by 1.
+ * This makes actmon_count exactly reflect active adsp cpu clk
+ * cycles.
+ */
+ .count_weight = 0x100,
+
+ /*
+ * FREQ_SAMPLER: samples number of device(adsp) active cycles
+ * weighted by count_weight to reflect * actmon_count within a
+ * sample period.
+ * LOAD_SAMPLER: samples actmon active cycles weighted by
+ * count_weight to reflect actmon_count within a sample period.
+ */
+ .type = ACTMON_FREQ_SAMPLER,
+ .state = ACTMON_UNINITIALIZED,
+};
+
+static struct actmon_dev *actmon_devices[] = {
+ &actmon_dev_adsp,
+};
+
+static inline u32 actmon_readl(u32 offset)
+{
+ return __raw_readl(apemon->base + offset);
+}
+static inline void actmon_writel(u32 val, u32 offset)
+{
+ __raw_writel(val, apemon->base + offset);
+}
+static inline void actmon_wmb(void)
+{
+ wmb();
+}
+
+#define offs(x) (dev->reg + x)
+
+static inline unsigned long do_percent(unsigned long val, unsigned int pct)
+{
+ return val * pct / 100;
+}
+
+static void actmon_update_sample_period(unsigned long period)
+{
+ u32 sample_period_in_clks;
+ u32 val = 0;
+
+ apemon->sampling_period = period;
+ /*
+ * sample_period_in_clks <1..255> = (actmon_clk_freq<1..40800> *
+ * actmon_sample_period <10ms..40ms>) / SAMPLE_MS_DIVIDER(65536)
+ */
+ sample_period_in_clks = (apemon->freq * apemon->sampling_period) /
+ SAMPLE_MS_DIVIDER;
+
+ val = actmon_readl(ACTMON_DEV_CTRL);
+ val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
+ val |= (sample_period_in_clks <<
+ ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
+ & ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
+ actmon_writel(val, ACTMON_DEV_CTRL);
+}
+
+static inline void actmon_dev_up_wmark_set(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
+ dev->cur_freq : apemon->freq;
+
+ val = freq * apemon->sampling_period;
+ actmon_writel(do_percent(val, dev->boost_up_threshold),
+ offs(ACTMON_DEV_UP_WMARK));
+}
+
+static inline void actmon_dev_down_wmark_set(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
+ dev->cur_freq : apemon->freq;
+
+ val = freq * apemon->sampling_period;
+ actmon_writel(do_percent(val, dev->boost_down_threshold),
+ offs(ACTMON_DEV_DOWN_WMARK));
+}
+
+static inline void actmon_dev_wmark_set(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
+ dev->cur_freq : apemon->freq;
+
+ val = freq * apemon->sampling_period;
+
+ actmon_writel(do_percent(val, dev->boost_up_threshold),
+ offs(ACTMON_DEV_UP_WMARK));
+ actmon_writel(do_percent(val, dev->boost_down_threshold),
+ offs(ACTMON_DEV_DOWN_WMARK));
+}
+
+static inline void actmon_dev_avg_wmark_set(struct actmon_dev *dev)
+{
+ /*
+ * band: delta from current count to be set for avg upper
+ * and lower thresholds
+ */
+ u32 band = dev->avg_band_freq * apemon->sampling_period;
+ u32 avg = dev->avg_count;
+
+ actmon_writel(avg + band, offs(ACTMON_DEV_AVG_UP_WMARK));
+ avg = max(avg, band);
+ actmon_writel(avg - band, offs(ACTMON_DEV_AVG_DOWN_WMARK));
+}
+
+static unsigned long actmon_dev_avg_freq_get(struct actmon_dev *dev)
+{
+ u64 val;
+
+ if (dev->type == ACTMON_FREQ_SAMPLER)
+ return dev->avg_count / apemon->sampling_period;
+
+ val = (u64) dev->avg_count * dev->cur_freq;
+ do_div(val , apemon->freq * apemon->sampling_period);
+ return (u32)val;
+}
+
+/* Activity monitor sampling operations */
+static irqreturn_t ape_actmon_dev_isr(int irq, void *dev_id)
+{
+ u32 val, devval;
+ unsigned long flags;
+ struct actmon_dev *dev = (struct actmon_dev *)dev_id;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ val = actmon_readl(offs(ACTMON_DEV_INTR_STATUS));
+ actmon_writel(val, offs(ACTMON_DEV_INTR_STATUS)); /* clr all */
+ devval = actmon_readl(offs(ACTMON_DEV_CTRL));
+
+ if (val & ACTMON_DEV_INTR_AVG_UP_WMARK) {
+ devval |= (ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
+ ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
+ dev->avg_count = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
+ actmon_dev_avg_wmark_set(dev);
+ } else if (val & ACTMON_DEV_INTR_AVG_DOWN_WMARK) {
+ devval |= (ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
+ ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
+ dev->avg_count = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
+ actmon_dev_avg_wmark_set(dev);
+ }
+
+ if (val & ACTMON_DEV_INTR_UP_WMARK) {
+ devval |= (ACTMON_DEV_CTRL_UP_WMARK_ENB |
+ ACTMON_DEV_CTRL_DOWN_WMARK_ENB);
+
+ dev->boost_freq = dev->boost_freq_step +
+ do_percent(dev->boost_freq, dev->boost_up_coef);
+ if (dev->boost_freq >= dev->max_freq) {
+ dev->boost_freq = dev->max_freq;
+ devval &= ~ACTMON_DEV_CTRL_UP_WMARK_ENB;
+ }
+ } else if (val & ACTMON_DEV_INTR_DOWN_WMARK) {
+ devval |= (ACTMON_DEV_CTRL_UP_WMARK_ENB |
+ ACTMON_DEV_CTRL_DOWN_WMARK_ENB);
+
+ dev->boost_freq =
+ do_percent(dev->boost_freq, dev->boost_down_coef);
+ if (dev->boost_freq == 0) {
+ devval &= ~ACTMON_DEV_CTRL_DOWN_WMARK_ENB;
+ }
+ }
+
+ actmon_writel(devval, offs(ACTMON_DEV_CTRL));
+ actmon_wmb();
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t ape_actmon_dev_fn(int irq, void *dev_id)
+{
+ unsigned long flags, freq;
+ struct actmon_dev *dev = (struct actmon_dev *)dev_id;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->state != ACTMON_ON) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ freq = actmon_dev_avg_freq_get(dev);
+ dev->avg_actv_freq = freq; /* in kHz */
+ freq = do_percent(freq, dev->avg_sustain_coef);
+ freq += dev->boost_freq;
+
+ dev->target_freq = freq;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ dev_dbg(dev->device, "%s(kHz): avg: %lu, boost: %lu, target: %lu, current: %lu\n",
+ dev->clk_name, dev->avg_actv_freq, dev->boost_freq, dev->target_freq,
+ dev->cur_freq);
+
+#if defined(CONFIG_TEGRA_ADSP_DFS)
+ adsp_cpu_set_rate(freq);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+/* Activity monitor configuration and control */
+static void actmon_dev_configure(struct actmon_dev *dev,
+ unsigned long freq)
+{
+ u32 val;
+
+ dev->boost_freq = 0;
+ dev->cur_freq = freq;
+ dev->target_freq = freq;
+ dev->avg_actv_freq = freq;
+
+ if (dev->type == ACTMON_FREQ_SAMPLER) {
+ /*
+ * max actmon count = (count_weight * adsp_freq (khz)
+ * sample_period (ms)) / (PULSE_N_CLK+1)
+ * As Count_weight is set as 256(0x100) and
+ * (PULSE_N_CLK+1) = 256. both would be
+ * compensated while coming up max_actmon_count.
+ * in other word
+ * max actmon count = ((count_weight * adsp_freq *
+ * sample_period_reg * SAMPLE_TICK)
+ * / (ape_freq * (PULSE_N_CLK+1)))
+ * where -
+ * sample_period_reg : <1..255> sample period in no of
+ * actmon clocks per sample
+ * SAMPLE_TICK : Arbtrary value for ms - 65536, us - 256
+ * (PULSE_N_CLK + 1) : 256 - No of adsp "active" clocks to
+ * increament raw_count/ actmon_count
+ * by one.
+ */
+ dev->avg_count = dev->cur_freq * apemon->sampling_period;
+ dev->avg_band_freq = dev->max_freq *
+ ACTMON_DEFAULT_AVG_BAND / 1000;
+ } else {
+ dev->avg_count = apemon->freq * apemon->sampling_period;
+ dev->avg_band_freq = apemon->freq *
+ ACTMON_DEFAULT_AVG_BAND / 1000;
+ }
+ actmon_writel(dev->avg_count, offs(ACTMON_DEV_INIT_AVG));
+
+ BUG_ON(!dev->boost_up_threshold);
+ dev->avg_sustain_coef = 100 * 100 / dev->boost_up_threshold;
+ actmon_dev_avg_wmark_set(dev);
+ actmon_dev_wmark_set(dev);
+
+ actmon_writel(dev->count_weight, offs(ACTMON_DEV_COUNT_WEGHT));
+ val = actmon_readl(ACTMON_DEV_CTRL);
+
+ val |= (ACTMON_DEV_CTRL_PERIODIC_ENB |
+ ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
+ ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
+ val |= ((dev->avg_window_log2 - 1) << ACTMON_DEV_CTRL_K_VAL_SHIFT) &
+ ACTMON_DEV_CTRL_K_VAL_MASK;
+ val |= ((dev->down_wmark_window - 1) <<
+ ACTMON_DEV_CTRL_DOWN_WMARK_NUM_SHIFT) &
+ ACTMON_DEV_CTRL_DOWN_WMARK_NUM_MASK;
+ val |= ((dev->up_wmark_window - 1) <<
+ ACTMON_DEV_CTRL_UP_WMARK_NUM_SHIFT) &
+ ACTMON_DEV_CTRL_UP_WMARK_NUM_MASK;
+ val |= ACTMON_DEV_CTRL_DOWN_WMARK_ENB |
+ ACTMON_DEV_CTRL_UP_WMARK_ENB;
+
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ actmon_wmb();
+}
+
+static void actmon_dev_enable(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->state == ACTMON_OFF) {
+ dev->state = ACTMON_ON;
+
+ val = actmon_readl(offs(ACTMON_DEV_CTRL));
+ val |= ACTMON_DEV_CTRL_ENB;
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ actmon_wmb();
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void actmon_dev_disable(struct actmon_dev *dev)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->state == ACTMON_ON) {
+ dev->state = ACTMON_OFF;
+
+ val = actmon_readl(offs(ACTMON_DEV_CTRL));
+ val &= ~ACTMON_DEV_CTRL_ENB;
+ actmon_writel(val, offs(ACTMON_DEV_CTRL));
+ actmon_writel(0xffffffff, offs(ACTMON_DEV_INTR_STATUS));
+ actmon_wmb();
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+static int actmon_dev_probe(struct actmon_dev *dev)
+{
+ struct nvadsp_drv_data *drv_data = dev_get_drvdata(dev->device);
+ int ret;
+
+ dev->irq = drv_data->agic_irqs[ACTMON_VIRQ];
+ ret = request_threaded_irq(dev->irq, ape_actmon_dev_isr,
+ ape_actmon_dev_fn, IRQ_TYPE_LEVEL_HIGH,
+ dev->clk_name, dev);
+ if (ret) {
+ dev_err(dev->device, "Failed irq %d request for %s\n", dev->irq,
+ dev->clk_name);
+ goto end;
+ }
+ disable_irq(dev->irq);
+end:
+ return ret;
+}
+
+static int actmon_dev_init(struct actmon_dev *dev)
+{
+ int ret = -EINVAL;
+ unsigned long freq;
+
+ spin_lock_init(&dev->lock);
+
+ dev->clk = clk_get_sys(NULL, dev->clk_name);
+ if (IS_ERR_OR_NULL(dev->clk)) {
+ dev_err(dev->device, "Failed to find %s clock\n",
+ dev->clk_name);
+ goto end;
+ }
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret) {
+ dev_err(dev->device, "unable to enable %s clock\n",
+ dev->clk_name);
+ goto err_enable;
+ }
+
+ dev->max_freq = freq = clk_get_rate(dev->clk) / 1000;
+ actmon_dev_configure(dev, freq);
+
+ dev->state = ACTMON_OFF;
+ actmon_dev_enable(dev);
+ enable_irq(dev->irq);
+ return 0;
+
+err_enable:
+ clk_put(dev->clk);
+end:
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define RW_MODE (S_IWUSR | S_IRUSR)
+#define RO_MODE S_IRUSR
+
+static struct dentry *clk_debugfs_root;
+
+static int type_show(struct seq_file *s, void *data)
+{
+ struct actmon_dev *dev = s->private;
+
+ seq_printf(s, "%s\n", (dev->type == ACTMON_LOAD_SAMPLER) ?
+ "Load Activity Monitor" : "Frequency Activity Monitor");
+ return 0;
+}
+static int type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, type_show, inode->i_private);
+}
+static const struct file_operations type_fops = {
+ .open = type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int actv_get(void *data, u64 *val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ *val = actmon_dev_avg_freq_get(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(actv_fops, actv_get, NULL, "%llu\n");
+
+static int step_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->boost_freq_step * 100 / dev->max_freq;
+ return 0;
+}
+static int step_set(void *data, u64 val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+
+ if (val > 100)
+ val = 100;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->boost_freq_step = do_percent(dev->max_freq, (unsigned int)val);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(step_fops, step_get, step_set, "%llu\n");
+
+static int count_weight_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->count_weight;
+ return 0;
+}
+static int count_weight_set(void *data, u64 val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->count_weight = (u32) val;
+ actmon_writel(dev->count_weight, offs(ACTMON_DEV_COUNT_WEGHT));
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cnt_wt_fops, count_weight_get,
+ count_weight_set, "%llu\n");
+
+static int up_threshold_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->boost_up_threshold;
+ return 0;
+}
+static int up_threshold_set(void *data, u64 val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+ unsigned int up_threshold = (unsigned int)val;
+
+ if (up_threshold > 100)
+ up_threshold = 100;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (up_threshold <= dev->boost_down_threshold)
+ up_threshold = dev->boost_down_threshold;
+ if (up_threshold)
+ dev->avg_sustain_coef = 100 * 100 / up_threshold;
+ dev->boost_up_threshold = up_threshold;
+
+ actmon_dev_up_wmark_set(dev);
+ actmon_wmb();
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(up_threshold_fops, up_threshold_get,
+ up_threshold_set, "%llu\n");
+
+static int down_threshold_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->boost_down_threshold;
+ return 0;
+}
+static int down_threshold_set(void *data, u64 val)
+{
+ unsigned long flags;
+ struct actmon_dev *dev = data;
+ unsigned int down_threshold = (unsigned int)val;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (down_threshold >= dev->boost_up_threshold)
+ down_threshold = dev->boost_up_threshold;
+ dev->boost_down_threshold = down_threshold;
+
+ actmon_dev_down_wmark_set(dev);
+ actmon_wmb();
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(down_threshold_fops, down_threshold_get,
+ down_threshold_set, "%llu\n");
+
+static int state_get(void *data, u64 *val)
+{
+ struct actmon_dev *dev = data;
+ *val = dev->state;
+ return 0;
+}
+static int state_set(void *data, u64 val)
+{
+ struct actmon_dev *dev = data;
+
+ if (val)
+ actmon_dev_enable(dev);
+ else
+ actmon_dev_disable(dev);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, state_set, "%llu\n");
+
+/* Get period in msec */
+static int period_get(void *data, u64 *val)
+{
+ *val = apemon->sampling_period;
+ return 0;
+}
+/* Set period in msec */
+static int period_set(void *data, u64 val)
+{
+ int i;
+ unsigned long flags;
+ u8 period = (u8)val;
+
+ if (period) {
+ actmon_update_sample_period(period);
+
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
+ struct actmon_dev *dev = actmon_devices[i];
+ spin_lock_irqsave(&dev->lock, flags);
+ actmon_dev_wmark_set(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ actmon_wmb();
+ return 0;
+ }
+ return -EINVAL;
+}
+DEFINE_SIMPLE_ATTRIBUTE(period_fops, period_get, period_set, "%llu\n");
+
+
+static int actmon_debugfs_create_dev(struct actmon_dev *dev)
+{
+ struct dentry *dir, *d;
+
+ if (dev->state == ACTMON_UNINITIALIZED)
+ return 0;
+
+ dir = debugfs_create_dir(dev->clk_name, clk_debugfs_root);
+ if (!dir)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "actv_type", RO_MODE, dir, dev, &type_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "avg_activity", RO_MODE, dir, dev, &actv_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "boost_step", RW_MODE, dir, dev, &step_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_u32(
+ "boost_rate_dec", RW_MODE, dir, (u32 *)&dev->boost_down_coef);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_u32(
+ "boost_rate_inc", RW_MODE, dir, (u32 *)&dev->boost_up_coef);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "boost_threshold_dn", RW_MODE, dir, dev, &down_threshold_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "boost_threshold_up", RW_MODE, dir, dev, &up_threshold_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "state", RW_MODE, dir, dev, &state_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file(
+ "cnt_wt", RW_MODE, dir, dev, &cnt_wt_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int actmon_debugfs_init(struct nvadsp_drv_data *drv)
+{
+ int i;
+ int ret = -ENOMEM;
+ struct dentry *d;
+
+ if (!drv->adsp_debugfs_root)
+ return ret;
+ d = debugfs_create_dir("adsp_actmon", drv->adsp_debugfs_root);
+ if (!d)
+ return ret;
+ clk_debugfs_root = d;
+
+ d = debugfs_create_file("period", RW_MODE, d, NULL, &period_fops);
+ if (!d)
+ goto err_out;
+
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
+ ret = actmon_debugfs_create_dev(actmon_devices[i]);
+ if (ret)
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(clk_debugfs_root);
+ return ret;
+}
+
+#endif
+
+/* freq in KHz */
+void actmon_rate_change(unsigned long freq, bool override)
+{
+ struct actmon_dev *dev = &actmon_dev_adsp;
+ unsigned long flags;
+
+ if (override) {
+ actmon_dev_disable(dev);
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->cur_freq = freq;
+ dev->avg_count = freq * apemon->sampling_period;
+ actmon_writel(dev->avg_count, offs(ACTMON_DEV_INIT_AVG));
+ actmon_dev_avg_wmark_set(dev);
+ actmon_dev_wmark_set(dev);
+ actmon_wmb();
+ spin_unlock_irqrestore(&dev->lock, flags);
+ actmon_dev_enable(dev);
+ } else {
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->cur_freq = freq;
+ if (dev->state == ACTMON_ON) {
+ actmon_dev_wmark_set(dev);
+ actmon_wmb();
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ /* change ape rate as half of adsp rate */
+ clk_set_rate(apemon->clk, freq * 500);
+};
+
+int ape_actmon_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
+ actmon_devices[i]->device = &pdev->dev;
+ ret = actmon_dev_probe(actmon_devices[i]);
+ dev_dbg(&pdev->dev, "%s actmon: %s probe (%d)\n",
+ actmon_devices[i]->clk_name, ret ? "Failed" : "Completed", ret);
+ }
+ return ret;
+}
+
+static int ape_actmon_rc_cb(
+ struct notifier_block *nb, unsigned long rate, void *v)
+{
+ struct actmon_dev *dev = &actmon_dev_adsp;
+ unsigned long flags;
+ u32 init_cnt;
+
+ if (dev->state != ACTMON_ON) {
+ dev_dbg(dev->device, "adsp actmon is not ON\n");
+ goto exit_out;
+ }
+
+ actmon_dev_disable(dev);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ init_cnt = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
+ /* update sample period to maintain number of clock */
+ apemon->freq = rate / 1000; /* in KHz */
+ actmon_update_sample_period(ACTMON_DEFAULT_SAMPLING_PERIOD);
+ actmon_writel(init_cnt, offs(ACTMON_DEV_INIT_AVG));
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ actmon_dev_enable(dev);
+exit_out:
+ return NOTIFY_OK;
+}
+int ape_actmon_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ static void __iomem *amisc_base;
+ u32 sample_period_in_clks;
+ struct clk *p;
+ u32 val = 0;
+ int i, ret;
+
+ if (drv->actmon_initialized)
+ return 0;
+
+ apemon = &ape_actmon;
+ apemon->base = drv->base_regs[AMISC] + ACTMON_REG_OFFSET;
+ amisc_base = drv->base_regs[AMISC];
+
+ apemon->clk = clk_get_sys(NULL, "adsp.ape");
+ if (!apemon->clk) {
+ dev_err(&pdev->dev, "Failed to find actmon clock\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = clk_prepare_enable(apemon->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable actmon clock\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+ apemon->clk_rc_nb.notifier_call = ape_actmon_rc_cb;
+
+ /*
+ * "adsp.ape" clk is shared bus user clock and "ape" is bus clock
+ * but rate change notification should come from bus clock itself.
+ */
+ p = clk_get_parent(apemon->clk);
+ if (!p) {
+ dev_err(&pdev->dev, "Failed to find actmon parent clock\n");
+ ret = -EINVAL;
+ goto clk_err_out;
+ }
+
+ ret = tegra_register_clk_rate_notifier(p, &apemon->clk_rc_nb);
+ if (ret) {
+ dev_err(&pdev->dev, "Registration fail: %s rate change notifier for %s\n",
+ p->name, apemon->clk->name);
+ goto clk_err_out;
+ }
+ apemon->freq = clk_get_rate(apemon->clk) / 1000; /* in KHz */
+
+ apemon->sampling_period = ACTMON_DEFAULT_SAMPLING_PERIOD;
+
+ /*
+ * sample period as no of actmon clocks
+ * Actmon is derived from APE clk.
+ * suppose APE clk is 204MHz = 204000 KHz and want to calculate
+ * clocks in 10ms sample
+ * in 1ms = 204000 cycles
+ * 10ms = 204000 * 10 APE cycles
+ * SAMPLE_MS_DIVIDER is an arbitrary number
+ */
+ sample_period_in_clks = (apemon->freq * apemon->sampling_period)
+ / SAMPLE_MS_DIVIDER;
+
+ /* set ms mode */
+ actmon_writel(ACTMON_DEV_SAMPLE_CTRL_TICK_65536,
+ ACTMON_DEV_SAMPLE_CTRL);
+ val = actmon_readl(ACTMON_DEV_CTRL);
+ val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
+ val |= (sample_period_in_clks <<
+ ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
+ & ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
+ actmon_writel(val, ACTMON_DEV_CTRL);
+
+ /* Enable AMISC_ACTMON */
+ val = __raw_readl(amisc_base + AMISC_ACTMON_0);
+ val |= AMISC_ACTMON_CNT_TARGET_ENABLE;
+ __raw_writel(val, amisc_base + AMISC_ACTMON_0);
+
+ actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS); /* clr all */
+
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
+ ret = actmon_dev_init(actmon_devices[i]);
+ dev_dbg(&pdev->dev, "%s actmon device: %s initialization (%d)\n",
+ actmon_devices[i]->clk_name, ret ? "Failed" : "Completed", ret);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ actmon_debugfs_init(drv);
+#endif
+
+ drv->actmon_initialized = true;
+
+ dev_dbg(&pdev->dev, "adsp actmon initialized ....\n");
+ return 0;
+clk_err_out:
+ if (apemon->clk)
+ clk_disable_unprepare(apemon->clk);
+err_out:
+ if (apemon->clk)
+ clk_put(apemon->clk);
+ return ret;
+}
+
+int ape_actmon_exit(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ struct actmon_dev *dev;
+ status_t ret = 0;
+ int i;
+
+ /* return if actmon is not initialized */
+ if (!drv->actmon_initialized)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
+ dev = actmon_devices[i];
+ actmon_dev_disable(dev);
+ disable_irq(dev->irq);
+ clk_disable_unprepare(dev->clk);
+ clk_put(dev->clk);
+ }
+
+ tegra_unregister_clk_rate_notifier(clk_get_parent(apemon->clk),
+ &apemon->clk_rc_nb);
+
+ clk_disable_unprepare(apemon->clk);
+ clk_put(apemon->clk);
+
+ drv->actmon_initialized = false;
+
+ dev_dbg(&pdev->dev, "adsp actmon has exited ....\n");
+
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/ape_actmon.h b/drivers/platform/tegra/nvadsp/ape_actmon.h
new file mode 100644
index 00000000..ae7d1723
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/ape_actmon.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __APE_ACTMON_H
+#define __APE_ACTMON_H
+#include
+
+enum actmon_type {
+ ACTMON_LOAD_SAMPLER,
+ ACTMON_FREQ_SAMPLER,
+};
+
+enum actmon_state {
+ ACTMON_UNINITIALIZED = -1,
+ ACTMON_OFF = 0,
+ ACTMON_ON = 1,
+ ACTMON_SUSPENDED = 2,
+};
+/* Units:
+ * - frequency in kHz
+ * - coefficients, and thresholds in %
+ * - sampling period in ms
+ * - window in sample periods (value = setting + 1)
+ */
+struct actmon_dev {
+ u32 reg;
+ int irq;
+ struct device *device;
+
+ const char *dev_id;
+ const char *con_id;
+ const char *clk_name;
+ struct clk *clk;
+
+ unsigned long max_freq;
+ unsigned long target_freq;
+ unsigned long cur_freq;
+ unsigned long suspend_freq;
+
+ unsigned long avg_actv_freq;
+ unsigned long avg_band_freq;
+ unsigned int avg_sustain_coef;
+ u32 avg_count;
+
+ unsigned long boost_freq;
+ unsigned long boost_freq_step;
+ unsigned int boost_up_coef;
+ unsigned int boost_down_coef;
+ unsigned int boost_up_threshold;
+ unsigned int boost_down_threshold;
+
+ u8 up_wmark_window;
+ u8 down_wmark_window;
+ u8 avg_window_log2;
+ u32 count_weight;
+
+ enum actmon_type type;
+ enum actmon_state state;
+ enum actmon_state saved_state;
+
+ spinlock_t lock;
+
+};
+
+struct actmon {
+ struct clk *clk;
+ unsigned long freq;
+ unsigned long sampling_period;
+ struct notifier_block clk_rc_nb;
+ void __iomem *base;
+};
+
+int ape_actmon_init(struct platform_device *pdev);
+int ape_actmon_exit(struct platform_device *pdev);
+void actmon_rate_change(unsigned long freq, bool override);
+#endif
diff --git a/drivers/platform/tegra/nvadsp/app.c b/drivers/platform/tegra/nvadsp/app.c
new file mode 100644
index 00000000..3cfb5f37
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/app.c
@@ -0,0 +1,1031 @@
+/*
+ * run_app.c
+ *
+ * ADSP OS App management
+ *
+ * Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "aram_manager.h"
+#include "os.h"
+#include "dev.h"
+#include "adsp_shared_struct.h"
+
+#define DYN_APP_EXTN ".elf"
+#define ADSP_APP_INIT_TIMEOUT 2000 /* in ms */
+
+/*
+ * structure to hold the list of app binaries loaded and
+ * its associated instances.
+*/
+struct nvadsp_app_service {
+ char name[NVADSP_NAME_SZ];
+ struct list_head node;
+ int instance;
+ struct mutex lock;
+ struct list_head app_head;
+ const uint32_t token;
+ const struct app_mem_size *mem_size;
+ int generated_instance_id;
+ struct adsp_module *mod;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+};
+
+/* nvadsp app loader private structure */
+struct nvadsp_app_priv_struct {
+ struct platform_device *pdev;
+ struct completion os_load_complete;
+ struct nvadsp_mbox mbox;
+ struct list_head service_list;
+ struct mutex service_lock_list;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *adsp_app_debugfs_root;
+#endif
+};
+
+static struct nvadsp_app_priv_struct priv;
+
+static void delete_app_instance(nvadsp_app_info_t *);
+
+#ifdef CONFIG_DEBUG_FS
+static int dump_binary_in_2bytes_app_file_node(struct seq_file *s, void *data)
+{
+ struct nvadsp_app_service *ser = s->private;
+ struct adsp_module *mod = ser->mod;
+ u32 adsp_ptr;
+ u16 *ptr;
+ int i;
+
+ adsp_ptr = mod->adsp_module_ptr;
+ ptr = (u16 *)mod->module_ptr;
+ for (i = 0; i < mod->size; i += 2)
+ seq_printf(s, "0x%x : 0x%04x\n", adsp_ptr + i, *(ptr + i));
+
+ return 0;
+}
+
+
+static int dump_binary_in_words_app_file_node(struct seq_file *s, void *data)
+{
+ struct nvadsp_app_service *ser = s->private;
+ struct adsp_module *mod = ser->mod;
+ u32 adsp_ptr;
+ u32 *ptr;
+ int i;
+
+ adsp_ptr = mod->adsp_module_ptr;
+ ptr = (u32 *)mod->module_ptr;
+ for (i = 0; i < mod->size; i += 4)
+ seq_printf(s, "0x%x : 0x%08x\n", adsp_ptr + i, *(ptr + i));
+
+ return 0;
+}
+
+static int host_load_addr_app_file_node(struct seq_file *s, void *data)
+{
+ struct nvadsp_app_service *ser = s->private;
+ struct adsp_module *mod = ser->mod;
+
+ seq_printf(s, "%p\n", mod->module_ptr);
+
+ return 0;
+}
+
+static int adsp_load_addr_app_file_node(struct seq_file *s, void *data)
+{
+ struct nvadsp_app_service *ser = s->private;
+ struct adsp_module *mod = ser->mod;
+
+ seq_printf(s, "0x%x\n", mod->adsp_module_ptr);
+
+ return 0;
+}
+
+static int size_app_file_node(struct seq_file *s, void *data)
+{
+ struct nvadsp_app_service *ser = s->private;
+ struct adsp_module *mod = ser->mod;
+
+ seq_printf(s, "%lu\n", mod->size);
+
+ return 0;
+}
+
+static int version_app_file_node(struct seq_file *s, void *data)
+{
+ struct nvadsp_app_service *ser = s->private;
+ struct adsp_module *mod = ser->mod;
+
+ seq_printf(s, "%s\n", strcmp(mod->version, "") ? mod->version : "unavailable");
+
+ return 0;
+}
+
+static int dram_app_file_node(struct seq_file *s, void *data)
+{
+ const struct app_mem_size *mem_size = s->private;
+
+ seq_printf(s, "%llu\n", mem_size->dram);
+
+ return 0;
+}
+
+static int dram_shared_app_file_node(struct seq_file *s, void *data)
+{
+ const struct app_mem_size *mem_size = s->private;
+
+ seq_printf(s, "%llu\n", mem_size->dram_shared);
+
+ return 0;
+}
+
+static int dram_shared_wc_app_file_node(struct seq_file *s, void *data)
+{
+ const struct app_mem_size *mem_size = s->private;
+
+ seq_printf(s, "%llu\n", mem_size->dram_shared_wc);
+
+ return 0;
+}
+
+static int aram_app_file_node(struct seq_file *s, void *data)
+{
+ const struct app_mem_size *mem_size = s->private;
+
+ seq_printf(s, "%llu\n", mem_size->aram);
+
+ return 0;
+}
+
+static int aram_exclusive_app_file_node(struct seq_file *s, void *data)
+{
+ const struct app_mem_size *mem_size = s->private;
+
+ seq_printf(s, "%llu\n", mem_size->aram_x);
+
+ return 0;
+}
+
+#define ADSP_APP_CREATE_FOLDER(x, root) \
+ do {\
+ x = debugfs_create_dir(#x, root); \
+ if (IS_ERR_OR_NULL(x)) { \
+ dev_err(dev, "unable to create app %s folder\n", #x); \
+ ret = -ENOENT; \
+ goto rm_debug_root; \
+ } \
+ } while (0)
+
+#define ADSP_APP_CREATE_FILE(x, priv, root) \
+ do { \
+ if (IS_ERR_OR_NULL(debugfs_create_file(#x, S_IRUSR, root, \
+ priv, &x##_node_operations))) { \
+ dev_err(dev, "unable tp create app %s file\n", #x); \
+ ret = -ENOENT; \
+ goto rm_debug_root; \
+ } \
+ } while (0)
+
+#define ADSP_APP_FILE_OPERATION(x) \
+static int x##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, x##_app_file_node, inode->i_private); \
+} \
+\
+static const struct file_operations x##_node_operations = { \
+ .open = x##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+ADSP_APP_FILE_OPERATION(dump_binary_in_2bytes);
+ADSP_APP_FILE_OPERATION(dump_binary_in_words);
+ADSP_APP_FILE_OPERATION(host_load_addr);
+ADSP_APP_FILE_OPERATION(adsp_load_addr);
+ADSP_APP_FILE_OPERATION(size);
+ADSP_APP_FILE_OPERATION(version);
+
+ADSP_APP_FILE_OPERATION(dram);
+ADSP_APP_FILE_OPERATION(dram_shared);
+ADSP_APP_FILE_OPERATION(dram_shared_wc);
+ADSP_APP_FILE_OPERATION(aram);
+ADSP_APP_FILE_OPERATION(aram_exclusive);
+
+static int create_adsp_app_debugfs(struct nvadsp_app_service *ser)
+{
+
+ struct app_mem_size *mem_size = (struct app_mem_size *)ser->mem_size;
+ struct device *dev = &priv.pdev->dev;
+ struct dentry *instance_mem_sizes;
+ struct dentry *root;
+ int ret = 0;
+
+ root = debugfs_create_dir(ser->name,
+ priv.adsp_app_debugfs_root);
+ if (IS_ERR_OR_NULL(root)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ADSP_APP_CREATE_FILE(dump_binary_in_2bytes, ser, root);
+ ADSP_APP_CREATE_FILE(dump_binary_in_words, ser, root);
+ ADSP_APP_CREATE_FILE(host_load_addr, ser, root);
+ ADSP_APP_CREATE_FILE(adsp_load_addr, ser, root);
+ ADSP_APP_CREATE_FILE(size, ser, root);
+ ADSP_APP_CREATE_FILE(version, ser, root);
+ ADSP_APP_CREATE_FOLDER(instance_mem_sizes, root);
+ ADSP_APP_CREATE_FILE(dram, mem_size, instance_mem_sizes);
+ ADSP_APP_CREATE_FILE(dram_shared, mem_size, instance_mem_sizes);
+ ADSP_APP_CREATE_FILE(dram_shared_wc, mem_size, instance_mem_sizes);
+ ADSP_APP_CREATE_FILE(aram, mem_size, instance_mem_sizes);
+ ADSP_APP_CREATE_FILE(aram_exclusive, mem_size, instance_mem_sizes);
+
+ root = ser->debugfs;
+ return 0;
+rm_debug_root:
+ debugfs_remove_recursive(root);
+err_out:
+ return ret;
+}
+
+static int __init adsp_app_debug_init(struct dentry *root)
+{
+ priv.adsp_app_debugfs_root = debugfs_create_dir("adsp_apps", root);
+ return IS_ERR_OR_NULL(priv.adsp_app_debugfs_root) ? -ENOMEM : 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static struct nvadsp_app_service *get_loaded_service(const char *appfile)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct nvadsp_app_service *ser;
+
+ list_for_each_entry(ser, &priv.service_list, node) {
+ if (!strcmp(appfile, ser->name)) {
+ dev_dbg(dev, "module %s already loaded\n", appfile);
+ return ser;
+ }
+ }
+ dev_dbg(dev, "module %s will be loaded\n", appfile);
+ return NULL;
+}
+
+static inline void extract_appname(char *appname, const char *appfile)
+{
+ char *token = strstr(appfile, DYN_APP_EXTN);
+ int len = token ? token - appfile : strlen(appfile);
+
+ strncpy(appname, appfile, len);
+ appname[len] = '\0';
+}
+
+static nvadsp_app_handle_t app_load(const char *appfile,
+ struct adsp_shared_app *shared_app, bool dynamic)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev = &priv.pdev->dev;
+ char appname[NVADSP_NAME_SZ] = { };
+ struct nvadsp_app_service *ser;
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ extract_appname(appname, appfile);
+ mutex_lock(&priv.service_lock_list);
+ ser = get_loaded_service(appname);
+ if (!ser) {
+
+ /* dynamic loading is disabled when running in secure mode */
+ if (drv_data->adsp_os_secload && dynamic)
+ goto err;
+ dev_dbg(dev, "loading app %s %s\n", appfile, appname);
+ ser = devm_kzalloc(dev, sizeof(*ser), GFP_KERNEL);
+ if (!ser)
+ goto err;
+ strlcpy(ser->name, appname, NVADSP_NAME_SZ);
+
+ /*load the module in to memory */
+ ser->mod = dynamic ?
+ load_adsp_dynamic_module(appfile, appfile, dev) :
+ load_adsp_static_module(appfile, shared_app, dev);
+ if (IS_ERR_OR_NULL(ser->mod))
+ goto err_free_service;
+ ser->mem_size = &ser->mod->mem_size;
+
+ mutex_init(&ser->lock);
+ INIT_LIST_HEAD(&ser->app_head);
+
+ /* add the app instance service to the list */
+ list_add_tail(&ser->node, &priv.service_list);
+#ifdef CONFIG_DEBUG_FS
+ create_adsp_app_debugfs(ser);
+#endif
+ dev_dbg(dev, "loaded app %s\n", ser->name);
+ }
+ mutex_unlock(&priv.service_lock_list);
+
+ return ser;
+
+err_free_service:
+ devm_kfree(dev, ser);
+err:
+ mutex_unlock(&priv.service_lock_list);
+ return NULL;
+}
+
+
+nvadsp_app_handle_t nvadsp_app_load(const char *appname, const char *appfile)
+{
+ struct nvadsp_drv_data *drv_data;
+
+ if (IS_ERR_OR_NULL(priv.pdev)) {
+ pr_err("ADSP Driver is not initialized\n");
+ return NULL;
+ }
+
+ drv_data = platform_get_drvdata(priv.pdev);
+
+ if (!drv_data->adsp_os_running)
+ return NULL;
+
+ return app_load(appfile, NULL, true);
+}
+EXPORT_SYMBOL(nvadsp_app_load);
+
+static void free_instance_memory(nvadsp_app_info_t *app,
+ const struct app_mem_size *sz)
+{
+ adsp_app_mem_t *mem = &app->mem;
+ adsp_app_iova_mem_t *iova_mem = &app->iova_mem;
+
+ if (mem->dram) {
+ nvadsp_free_coherent(sz->dram, mem->dram, iova_mem->dram);
+ mem->dram = NULL;
+ iova_mem->dram = 0;
+ }
+
+ if (mem->shared) {
+ nvadsp_free_coherent(sz->dram_shared, mem->shared,
+ iova_mem->shared);
+ mem->shared = NULL;
+ iova_mem->shared = 0;
+ }
+
+ if (mem->shared_wc) {
+ nvadsp_free_coherent(sz->dram_shared_wc, mem->shared_wc,
+ iova_mem->shared_wc);
+ mem->shared_wc = NULL;
+ iova_mem->shared_wc = 0;
+ }
+
+ if (mem->aram_flag)
+ nvadsp_aram_release(mem->aram);
+ else if (mem->aram)
+ nvadsp_free_coherent(sz->aram, mem->aram, iova_mem->aram);
+ mem->aram = NULL;
+ iova_mem->aram = 0;
+ mem->aram_flag = 0;
+
+ if (mem->aram_x_flag) {
+ nvadsp_aram_release(mem->aram_x);
+ mem->aram_x = NULL;
+ iova_mem->aram_x = 0;
+ mem->aram_flag = 0;
+ }
+
+}
+
+static int create_instance_memory(nvadsp_app_info_t *app,
+ const struct app_mem_size *sz)
+{
+ adsp_app_iova_mem_t *iova_mem = &app->iova_mem;
+ struct device *dev = &priv.pdev->dev;
+ adsp_app_mem_t *mem = &app->mem;
+ char name[NVADSP_NAME_SZ];
+ void *aram_handle;
+ dma_addr_t da;
+ int ret;
+
+ ret = snprintf(name, NVADSP_NAME_SZ, "%s:%d", app->name, app->instance_id);
+ if (ret < 0 || ret >= NVADSP_NAME_SZ) {
+ dev_err(dev, "Invalid App name %s\n", app->name);
+ return -EINVAL;
+ }
+
+ if (sz->dram) {
+ mem->dram = nvadsp_alloc_coherent(sz->dram, &da, GFP_KERNEL);
+ iova_mem->dram = (uint32_t)da;
+ if (!mem->dram) {
+ dev_err(dev, "app %s dram alloc failed\n",
+ name);
+ goto end;
+ }
+ dev_dbg(dev, "%s :: mem.dram %p 0x%x\n", name,
+ mem->dram, iova_mem->dram);
+ }
+
+ if (sz->dram_shared) {
+ mem->shared = nvadsp_alloc_coherent(sz->dram_shared,
+ &da, GFP_KERNEL);
+ if (!mem->shared) {
+ dev_err(dev, "app %s shared dram alloc failed\n",
+ name);
+ goto end;
+ }
+ iova_mem->shared = (uint32_t)da;
+ dev_dbg(dev, "%s :: mem.shared %p 0x%x\n", name,
+ mem->shared, iova_mem->shared);
+ }
+
+ if (sz->dram_shared_wc) {
+ mem->shared_wc = nvadsp_alloc_coherent(sz->dram_shared_wc,
+ &da, GFP_KERNEL);
+ if (!mem->shared_wc) {
+ dev_err(dev, "app %s shared dram wc alloc failed\n",
+ name);
+ goto end;
+ }
+ iova_mem->shared_wc = (uint32_t)da;
+ dev_dbg(dev, "%s :: mem.shared_wc %p 0x%x\n", name,
+ mem->shared_wc, iova_mem->shared_wc);
+ }
+
+ if (sz->aram) {
+ aram_handle = nvadsp_aram_request(name, sz->aram);
+ if (!IS_ERR_OR_NULL(aram_handle)) {
+ iova_mem->aram = nvadsp_aram_get_address(aram_handle);
+ mem->aram = aram_handle;
+ iova_mem->aram_flag = mem->aram_flag = 1;
+ dev_dbg(dev, "%s aram %x\n", name, iova_mem->aram);
+ } else {
+ dev_dbg(dev, "app %s no ARAM memory ! using DRAM\n",
+ name);
+ mem->aram = nvadsp_alloc_coherent(sz->aram,
+ &da, GFP_KERNEL);
+ if (!mem->aram) {
+ iova_mem->aram_flag = mem->aram_flag = 0;
+ dev_err(dev,
+ "app %s aram memory alloc failed\n",
+ name);
+ goto end;
+ }
+ iova_mem->aram = (uint32_t)da;
+ dev_dbg(dev, "%s :: mem.aram %p 0x%x\n", name,
+ mem->aram, iova_mem->aram);
+ }
+ }
+
+ if (sz->aram_x) {
+ aram_handle = nvadsp_aram_request(name, sz->aram);
+ if (!IS_ERR_OR_NULL(aram_handle)) {
+ iova_mem->aram_x = nvadsp_aram_get_address(aram_handle);
+ mem->aram_x = aram_handle;
+ iova_mem->aram_x_flag = mem->aram_x_flag = 1;
+ dev_dbg(dev, "aram_x %x\n", iova_mem->aram_x);
+ } else {
+ iova_mem->aram_x = 0;
+ iova_mem->aram_x_flag = mem->aram_x_flag = 0;
+ dev_err(dev, "app %s aram x memory alloc failed\n",
+ name);
+ }
+ }
+ return 0;
+
+end:
+ free_instance_memory(app, sz);
+ return -ENOMEM;
+}
+
+static void fill_app_instance_data(nvadsp_app_info_t *app,
+ struct nvadsp_app_service *ser, nvadsp_app_args_t *app_args,
+ struct run_app_instance_data *data, uint32_t stack_sz)
+{
+ adsp_app_iova_mem_t *iova_mem = &app->iova_mem;
+
+ data->adsp_mod_ptr = ser->mod->adsp_module_ptr;
+ /* copy the iova address to adsp so that adsp can access the memory */
+ data->dram_data_ptr = iova_mem->dram;
+ data->dram_shared_ptr = iova_mem->shared;
+ data->dram_shared_wc_ptr = iova_mem->shared_wc;
+ data->aram_ptr = iova_mem->aram;
+ data->aram_flag = iova_mem->aram_flag;
+ data->aram_x_ptr = iova_mem->aram_x;
+ data->aram_x_flag = iova_mem->aram_x_flag;
+
+ if (app_args)
+ memcpy(&data->app_args, app_args, sizeof(nvadsp_app_args_t));
+ /*
+ * app on adsp holds the reference of host app instance to communicate
+ * back when completed. This way we do not need to iterate through the
+ * list to find the instance.
+ */
+ data->host_ref = (uint64_t)app;
+
+ /* copy instance mem_size */
+ memcpy(&data->mem_size, ser->mem_size, sizeof(struct app_mem_size));
+}
+
+static nvadsp_app_info_t *create_app_instance(nvadsp_app_handle_t handle,
+ nvadsp_app_args_t *app_args, struct run_app_instance_data *data,
+ app_complete_status_notifier notifier, uint32_t stack_size,
+ uint32_t core_id)
+{
+ struct nvadsp_app_service *ser = (void *)handle;
+ struct device *dev = &priv.pdev->dev;
+ nvadsp_app_info_t *app;
+ int *state;
+ int *id;
+
+ app = kzalloc(sizeof(*app), GFP_KERNEL);
+ if (unlikely(!app)) {
+ dev_err(dev, "cannot allocate memory for app %s instance\n",
+ ser->name);
+ goto err_value;
+ }
+ /* set the instance name with the app name */
+ app->name = ser->name;
+ /* associate a unique id */
+ id = (int *)&app->instance_id;
+ *id = ser->generated_instance_id++;
+ /*
+ * hold the pointer to the service, to dereference later during deinit
+ */
+ app->handle = ser;
+
+ /* create the instance memory required by the app instance */
+ if (create_instance_memory(app, ser->mem_size)) {
+ dev_err(dev, "instance creation failed for app %s:%d\n",
+ app->name, app->instance_id);
+ goto free_app;
+ }
+
+ /* assign the stack that is needed by the app */
+ data->stack_size = stack_size;
+ /* assign the core that is needed by the app */
+ data->core_id = core_id;
+
+ /* set the state to INITIALIZED. No need to do it in a spin lock */
+ state = (int *)&app->state;
+ *state = NVADSP_APP_STATE_INITIALIZED;
+
+ /* increment instance count and add the app instance to service list */
+ mutex_lock(&ser->lock);
+ list_add_tail(&app->node, &ser->app_head);
+ ser->instance++;
+ mutex_unlock(&ser->lock);
+
+ fill_app_instance_data(app, ser, app_args, data, stack_size);
+
+ init_completion(&app->wait_for_app_start);
+ init_completion(&app->wait_for_app_complete);
+ set_app_complete_notifier(app, notifier);
+
+ dev_dbg(dev, "app %s instance %d initilized\n",
+ app->name, app->instance_id);
+ dev_dbg(dev, "app %s has %d instances\n", ser->name, ser->instance);
+ goto end;
+
+free_app:
+ kfree(app);
+err_value:
+ app = ERR_PTR(-ENOMEM);
+end:
+ return app;
+}
+
+nvadsp_app_info_t __must_check *nvadsp_app_init(nvadsp_app_handle_t handle,
+ nvadsp_app_args_t *args)
+{
+ struct nvadsp_app_shared_msg_pool *msg_pool;
+ struct nvadsp_shared_mem *shared_mem;
+ union app_loader_message *message;
+ struct nvadsp_drv_data *drv_data;
+ struct app_loader_data *data;
+ nvadsp_app_info_t *app;
+ msgq_t *msgq_send;
+ int *state;
+ unsigned long flags, ret = 0;
+
+ if (IS_ERR_OR_NULL(priv.pdev)) {
+ pr_err("ADSP Driver is not initialized\n");
+ goto err;
+ }
+
+ drv_data = platform_get_drvdata(priv.pdev);
+
+ if (!drv_data->adsp_os_running) {
+ pr_err("ADSP is not running\n");
+ goto err;
+ }
+
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("ADSP APP handle is NULL\n");
+ goto err;
+ }
+
+ message = kzalloc(sizeof(*message), GFP_KERNEL);
+ if (!message) {
+ pr_err("Failed to allocate memory for ADSP msg\n");
+ goto err;
+ }
+
+ shared_mem = drv_data->shared_adsp_os_data;
+ msg_pool = &shared_mem->app_shared_msg_pool;
+ msgq_send = &msg_pool->app_loader_send_message.msgq;
+ data = &message->data;
+
+ /* Pinning app to core 0 by default */
+ app = create_app_instance(handle, args, &data->app_init, NULL, 0, 0);
+ if (IS_ERR_OR_NULL(app)) {
+ pr_err("Failed to create APP instance\n");
+ kfree(message);
+ goto err;
+ }
+ app->priv = data;
+ data->app_init.message = ADSP_APP_INIT;
+
+ message->msgq_msg.size = MSGQ_MSG_PAYLOAD_WSIZE(*message);
+
+ spin_lock_irqsave(&drv_data->mbox_lock, flags);
+ msgq_queue_message(msgq_send, &message->msgq_msg);
+ spin_unlock_irqrestore(&drv_data->mbox_lock, flags);
+
+ if (app->return_status) {
+ state = (int *)&app->state;
+ *state = NVADSP_APP_STATE_STARTED;
+ }
+
+ nvadsp_mbox_send(&priv.mbox, 0, NVADSP_MBOX_SMSG, false, 0);
+
+ ret = wait_for_completion_timeout(&app->wait_for_app_start,
+ msecs_to_jiffies(ADSP_APP_INIT_TIMEOUT));
+ if (!ret) {
+ delete_app_instance(app);
+ return NULL;
+ }
+ init_completion(&app->wait_for_app_start);
+ return app;
+err:
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(nvadsp_app_init);
+
+static int start_app_on_adsp(nvadsp_app_info_t *app,
+ union app_loader_message *message, bool block)
+{
+ struct nvadsp_app_shared_msg_pool *msg_pool;
+ struct device *dev = &priv.pdev->dev;
+ struct nvadsp_shared_mem *shared_mem;
+ struct nvadsp_drv_data *drv_data;
+ msgq_t *msgq_send;
+ int *state;
+ unsigned long flags;
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ shared_mem = drv_data->shared_adsp_os_data;
+ msg_pool = &shared_mem->app_shared_msg_pool;
+ msgq_send = &msg_pool->app_loader_send_message.msgq;
+
+ message->msgq_msg.size = MSGQ_MSG_PAYLOAD_WSIZE(*message);
+
+ spin_lock_irqsave(&drv_data->mbox_lock, flags);
+ msgq_queue_message(msgq_send, &message->msgq_msg);
+ spin_unlock_irqrestore(&drv_data->mbox_lock, flags);
+
+ state = (int *)&app->state;
+ *state = NVADSP_APP_STATE_STARTED;
+
+ nvadsp_mbox_send(&priv.mbox, 0, NVADSP_MBOX_SMSG, false, 0);
+
+ if (block) {
+ wait_for_completion(&app->wait_for_app_start);
+ if (app->return_status) {
+ dev_err(dev, "%s app instance %d failed to start\n",
+ app->name, app->instance_id);
+ state = (int *)&app->state;
+ *state = NVADSP_APP_STATE_INITIALIZED;
+ }
+ }
+
+ return app->return_status;
+}
+
+int nvadsp_app_start(nvadsp_app_info_t *app)
+{
+ union app_loader_message *message = app->priv;
+ struct app_loader_data *data = &message->data;
+ struct nvadsp_drv_data *drv_data;
+ int ret = -EINVAL;
+
+ if (IS_ERR_OR_NULL(app))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(priv.pdev)) {
+ pr_err("ADSP Driver is not initialized\n");
+ goto err;
+ }
+
+ drv_data = platform_get_drvdata(priv.pdev);
+
+ if (!drv_data->adsp_os_running)
+ goto err;
+
+ data->app_init.message = ADSP_APP_START;
+ data->app_init.adsp_ref = app->token;
+ data->app_init.stack_size = app->stack_size;
+ ret = start_app_on_adsp(app, app->priv, true);
+err:
+ return ret;
+}
+EXPORT_SYMBOL(nvadsp_app_start);
+
+nvadsp_app_info_t *nvadsp_run_app(nvadsp_os_handle_t os_handle,
+ const char *appfile, nvadsp_app_args_t *app_args,
+ app_complete_status_notifier notifier, uint32_t stack_sz,
+ uint32_t core_id, bool block)
+{
+ union app_loader_message message = {};
+ nvadsp_app_handle_t service_handle;
+ struct nvadsp_drv_data *drv_data;
+ nvadsp_app_info_t *info = NULL;
+ struct app_loader_data *data;
+ struct device *dev;
+ int ret;
+
+ if (IS_ERR_OR_NULL(priv.pdev)) {
+ pr_err("ADSP Driver is not initialized\n");
+ info = ERR_PTR(-EINVAL);
+ goto end;
+ }
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ dev = &priv.pdev->dev;
+
+ if (!drv_data->adsp_os_running)
+ goto end;
+
+ if (IS_ERR_OR_NULL(appfile))
+ goto end;
+
+ data = &message.data;
+ service_handle = app_load(appfile, NULL, true);
+ if (!service_handle) {
+ dev_err(dev, "unable to load the app %s\n", appfile);
+ goto end;
+ }
+
+ info = create_app_instance(service_handle, app_args,
+ &data->app_init, notifier, stack_sz, core_id);
+ if (IS_ERR_OR_NULL(info)) {
+ dev_err(dev, "unable to create instance for app %s\n", appfile);
+ goto end;
+ }
+ data->app_init.message = RUN_ADSP_APP;
+
+ ret = start_app_on_adsp(info, &message, block);
+ if (ret) {
+ delete_app_instance(info);
+ info = NULL;
+ }
+end:
+ return info;
+}
+EXPORT_SYMBOL(nvadsp_run_app);
+
+static void delete_app_instance(nvadsp_app_info_t *app)
+{
+ struct nvadsp_app_service *ser =
+ (struct nvadsp_app_service *)app->handle;
+ struct device *dev = &priv.pdev->dev;
+
+ dev_dbg(dev, "%s:freeing app %s:%d\n",
+ __func__, app->name, app->instance_id);
+
+ /* update the service app instance manager atomically */
+ mutex_lock(&ser->lock);
+ ser->instance--;
+ list_del(&app->node);
+ mutex_unlock(&ser->lock);
+
+ /* free instance memory */
+ free_instance_memory(app, ser->mem_size);
+ kfree(app->priv);
+ kfree(app);
+}
+
+void nvadsp_exit_app(nvadsp_app_info_t *app, bool terminate)
+{
+ int *state;
+
+ if (IS_ERR_OR_NULL(priv.pdev)) {
+ pr_err("ADSP Driver is not initialized\n");
+ return;
+ }
+
+ if (IS_ERR_OR_NULL(app))
+ return;
+
+ /* TODO: add termination if possible to kill thread on adsp */
+ if (app->state == NVADSP_APP_STATE_STARTED) {
+ wait_for_completion(&app->wait_for_app_complete);
+ state = (int *)&app->state;
+ *state = NVADSP_APP_STATE_INITIALIZED;
+ }
+ delete_app_instance(app);
+}
+EXPORT_SYMBOL(nvadsp_exit_app);
+
+int nvadsp_app_deinit(nvadsp_app_info_t *app)
+{
+ nvadsp_exit_app(app, false);
+ return 0;
+}
+EXPORT_SYMBOL(nvadsp_app_deinit);
+
+int nvadsp_app_stop(nvadsp_app_info_t *app)
+{
+ return -ENOENT;
+}
+EXPORT_SYMBOL(nvadsp_app_stop);
+
+void nvadsp_app_unload(nvadsp_app_handle_t handle)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct nvadsp_app_service *ser;
+ struct device *dev;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ return;
+ }
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ dev = &priv.pdev->dev;
+
+ if (!drv_data->adsp_os_running)
+ return;
+
+ if (IS_ERR_OR_NULL(handle))
+ return;
+
+ ser = (struct nvadsp_app_service *)handle;
+ if (!ser->mod->dynamic)
+ return;
+
+ mutex_lock(&priv.service_lock_list);
+ if (ser->instance) {
+ dev_err(dev, "cannot unload app %s, has instances %d\n",
+ ser->name, ser->instance);
+ return;
+ }
+
+ list_del(&ser->node);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ser->debugfs);
+#endif
+ unload_adsp_module(ser->mod);
+ devm_kfree(dev, ser);
+ mutex_unlock(&priv.service_lock_list);
+}
+EXPORT_SYMBOL(nvadsp_app_unload);
+
+static status_t nvadsp_app_receive_handler(uint32_t msg, void *hdata)
+{
+ union app_complete_status_message message = { };
+ struct nvadsp_app_shared_msg_pool *msg_pool;
+ struct app_complete_status_data *data;
+ struct nvadsp_shared_mem *shared_mem;
+ struct nvadsp_drv_data *drv_data;
+ struct platform_device *pdev;
+ nvadsp_app_info_t *app;
+ struct device *dev;
+ msgq_t *msgq_recv;
+ uint32_t *token;
+
+ pdev = hdata;
+ dev = &pdev->dev;
+ drv_data = platform_get_drvdata(pdev);
+ shared_mem = drv_data->shared_adsp_os_data;
+ msg_pool = &shared_mem->app_shared_msg_pool;
+ msgq_recv = &msg_pool->app_loader_recv_message.msgq;
+ data = &message.complete_status_data;
+
+ message.msgq_msg.size = MSGQ_MSG_PAYLOAD_WSIZE(*data);
+ if (msgq_dequeue_message(msgq_recv, &message.msgq_msg)) {
+ dev_err(dev, "unable to dequeue app status message\n");
+ return 0;
+ }
+
+ app = (nvadsp_app_info_t *)data->host_ref;
+ app->return_status = data->status;
+ app->status_msg = data->header.message;
+ token = (uint32_t *)&app->token;
+ *token = data->adsp_ref;
+
+ if (app->complete_status_notifier) {
+ app->complete_status_notifier(app,
+ app->status_msg, app->return_status);
+ }
+
+ switch (data->header.message) {
+ case ADSP_APP_START_STATUS:
+ complete_all(&app->wait_for_app_start);
+ break;
+ case ADSP_APP_COMPLETE_STATUS:
+ complete_all(&app->wait_for_app_complete);
+ break;
+ }
+
+ return 0;
+}
+
+int load_adsp_static_apps(void)
+{
+ struct nvadsp_app_shared_msg_pool *msg_pool;
+ struct nvadsp_shared_mem *shared_mem;
+ struct nvadsp_drv_data *drv_data;
+ struct platform_device *pdev;
+ struct device *dev;
+ msgq_t *msgq_recv;
+
+ pdev = priv.pdev;
+ dev = &pdev->dev;
+ drv_data = platform_get_drvdata(pdev);
+ shared_mem = drv_data->shared_adsp_os_data;
+ msg_pool = &shared_mem->app_shared_msg_pool;
+ msgq_recv = &msg_pool->app_loader_recv_message.msgq;
+
+ while (1) {
+ union app_complete_status_message message = { };
+ struct adsp_static_app_data *data;
+ struct adsp_shared_app *shared_app;
+ char *name;
+
+ data = &message.static_app_data;
+ message.msgq_msg.size = MSGQ_MSG_PAYLOAD_WSIZE(*data);
+ if (msgq_dequeue_message(msgq_recv, &message.msgq_msg)) {
+ dev_err(dev, "dequeue of static apps failed\n");
+ return -EINVAL;
+ }
+ shared_app = &data->shared_app;
+ name = shared_app->name;
+ if (!shared_app->mod_ptr)
+ break;
+ /* Skip Start on boot apps */
+ if (shared_app->flags & ADSP_APP_FLAG_START_ON_BOOT)
+ continue;
+ app_load(name, shared_app, false);
+ }
+ return 0;
+}
+
+int __init nvadsp_app_module_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+#endif
+ uint16_t mbox_id = APP_LOADER_MBOX_ID;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ dev_info(dev, "%s\n", __func__);
+
+ ret = nvadsp_mbox_open(&priv.mbox, &mbox_id,
+ "app_service", nvadsp_app_receive_handler, pdev);
+ if (ret) {
+ dev_err(dev, "unable to open mailbox\n");
+ goto end;
+ }
+ priv.pdev = pdev;
+ INIT_LIST_HEAD(&priv.service_list);
+ init_completion(&priv.os_load_complete);
+ mutex_init(&priv.service_lock_list);
+
+#ifdef CONFIG_DEBUG_FS
+ if (adsp_app_debug_init(drv_data->adsp_debugfs_root))
+ dev_err(&pdev->dev, "unable to create adsp apps debugfs\n");
+#endif
+end:
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/app_loader_linker.c b/drivers/platform/tegra/nvadsp/app_loader_linker.c
new file mode 100644
index 00000000..cfd8d193
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/app_loader_linker.c
@@ -0,0 +1,972 @@
+/*
+ * nvadsp_app.c
+ *
+ * ADSP OS App management
+ *
+ * Copyright (C) 2014-2022 NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "os.h"
+#include "dram_app_mem_manager.h"
+#include "adsp_shared_struct.h"
+
+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
+# define debug_align(X) ALIGN(X, PAGE_SIZE)
+#else
+# define debug_align(X) (X)
+#endif
+
+
+#ifndef ARCH_SHF_SMALL
+#define ARCH_SHF_SMALL 0
+#endif
+
+#define BITS_PER_INT 32
+#define INIT_OFFSET_MASK (1U < (BITS_PER_INT-1))
+
+
+#define HWCAP_SWP (1 << 0)
+#define HWCAP_HALF (1 << 1)
+#define HWCAP_THUMB (1 << 2)
+#define HWCAP_26BIT (1 << 3) /* Play it safe */
+#define HWCAP_FAST_MULT (1 << 4)
+#define HWCAP_FPA (1 << 5)
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP (1 << 7)
+#define HWCAP_JAVA (1 << 8)
+#define HWCAP_IWMMXT (1 << 9)
+#define HWCAP_CRUNCH (1 << 10)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+#define HWCAP_TLS (1 << 15)
+#define HWCAP_VFPv4 (1 << 16)
+#define HWCAP_IDIVA (1 << 17)
+#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
+
+#define HWCAP_LPAE (1 << 20)
+#define HWCAP_EVTSTRM_32 (1 << 21)
+
+
+#define EF_ARM_EABI_MASK 0xff000000
+#define EF_ARM_EABI_UNKNOWN 0x00000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
+#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
+#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
+#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
+#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
+#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
+#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
+#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
+#define EF_ARM_PIC 0x00000020 /* ABI 0 */
+#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
+#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
+#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
+#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
+#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
+#define EF_ARM_HASENTRY 0x00000002 /* All */
+#define EF_ARM_RELEXEC 0x00000001 /* All */
+
+
+#define R_ARM_NONE 0
+#define R_ARM_PC24 1
+#define R_ARM_ABS32 2
+#define R_ARM_CALL 28
+#define R_ARM_JUMP24 29
+#define R_ARM_TARGET1 38
+#define R_ARM_V4BX 40
+#define R_ARM_PREL31 42
+#define R_ARM_MOVW_ABS_NC 43
+#define R_ARM_MOVT_ABS 44
+
+#define R_ARM_THM_CALL 10
+#define R_ARM_THM_JUMP24 30
+#define R_ARM_THM_MOVW_ABS_NC 47
+#define R_ARM_THM_MOVT_ABS 48
+
+
+struct load_info {
+ const char *name;
+ struct elf32_hdr *hdr;
+ unsigned long len;
+ struct elf32_shdr *sechdrs;
+ char *secstrings, *strtab;
+ unsigned long symoffs, stroffs;
+ unsigned int num_debug;
+ bool sig_ok;
+ struct device *dev;
+ struct {
+ unsigned int sym, str, mod, vers, info, pcpu;
+ } index;
+};
+
+static int
+apply_relocate(const struct load_info *info, Elf32_Shdr *sechdrs,
+ const char *strtab, unsigned int symindex,
+ unsigned int relindex, struct adsp_module *module)
+{
+ Elf32_Shdr *symsec = sechdrs + symindex;
+ Elf32_Shdr *relsec = sechdrs + relindex;
+ Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
+ Elf32_Rel *rel = (void *)info->hdr + relsec->sh_offset;
+ struct device *dev = info->dev;
+ unsigned int i;
+
+ dev_dbg(dev, "the relative section is %s dst %s sym %s\n",
+ info->secstrings + relsec->sh_name,
+ info->secstrings + dstsec->sh_name,
+ info->secstrings + symsec->sh_name);
+
+ for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
+ void *loc;
+ Elf32_Sym *sym;
+ const char *symname;
+ s32 offset;
+ u32 upper, lower, sign, j1, j2;
+ uint32_t adsp_loc;
+ bool switch_mode = false;
+ int h_bit = 0;
+
+ offset = ELF32_R_SYM(rel->r_info);
+ if (offset < 0 || (offset >
+ (symsec->sh_size / sizeof(Elf32_Sym)))) {
+ dev_err(dev, "%s: section %u reloc %u: bad relocation sym offset\n",
+ module->name, relindex, i);
+ return -ENOEXEC;
+ }
+
+ sym = ((Elf32_Sym *)(module->module_ptr
+ + symsec->sh_addr)) + offset;
+ symname = info->strtab + sym->st_name;
+
+ dev_dbg(dev, "%s\n", symname);
+
+ if (rel->r_offset < 0 ||
+ rel->r_offset > dstsec->sh_size - sizeof(u32)) {
+ dev_err(dev,
+ "%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
+ module->name, relindex, i, symname,
+ rel->r_offset, dstsec->sh_size);
+ return -ENOEXEC;
+ }
+
+ loc = module->module_ptr + dstsec->sh_addr + rel->r_offset;
+ adsp_loc = module->adsp_module_ptr +
+ dstsec->sh_addr + rel->r_offset;
+ dev_dbg(dev, "%p 0x%x\n", loc, adsp_loc);
+
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK
+ && sym->st_shndx == SHN_UNDEF) {
+ dev_dbg(dev, "STB_WEAK %s\n", symname);
+ continue;
+ }
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_ARM_NONE:
+ dev_dbg(dev, "R_ARM_NONE\n");
+ /* ignore */
+ break;
+
+ case R_ARM_ABS32:
+ case R_ARM_TARGET1:
+ dev_dbg(dev, "R_ARM_ABS32\n");
+ *(u32 *)loc += sym->st_value;
+ dev_dbg(dev, "addrs: 0x%x %p values: 0x%x 0x%x\n",
+ adsp_loc, loc, sym->st_value,
+ *(u32 *)loc);
+ break;
+
+ case R_ARM_PC24:
+ case R_ARM_CALL:
+ case R_ARM_JUMP24:
+ dev_dbg(dev, "R_ARM_CALL R_ARM_JUMP24\n");
+ offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ if (offset & 0x02000000)
+ offset -= 0x04000000;
+
+ offset += sym->st_value - adsp_loc;
+ if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC)
+ && (offset & 3)) {
+ dev_dbg(dev, "switching the mode from ARM to THUMB\n");
+ switch_mode = true;
+ h_bit = (offset & 2);
+ dev_dbg(dev,
+ "%s offset 0x%x hbit %d",
+ symname, offset, h_bit);
+ }
+
+ if (offset <= (s32)0xfe000000 ||
+ offset >= (s32)0x02000000) {
+ dev_err(dev,
+ "%s: section %u reloc %u sym '%s': relocation %u out of range (%p -> %#x)\n",
+ module->name, relindex, i, symname,
+ ELF32_R_TYPE(rel->r_info), loc,
+ sym->st_value);
+ return -ENOEXEC;
+ }
+
+ offset >>= 2;
+
+ *(u32 *)loc &= 0xff000000;
+ *(u32 *)loc |= offset & 0x00ffffff;
+ if (switch_mode) {
+ *(u32 *)loc &= ~(0xff000000);
+ if (h_bit)
+ *(u32 *)loc |= 0xfb000000;
+ else
+ *(u32 *)loc |= 0xfa000000;
+ }
+
+ dev_dbg(dev,
+ "%s address 0x%x instruction 0x%x\n",
+ symname, adsp_loc, *(u32 *)loc);
+ break;
+
+ case R_ARM_V4BX:
+ dev_dbg(dev, "R_ARM_V4BX\n");
+ /* Preserve Rm and the condition code. Alter
+ * other bits to re-code instruction as
+ * MOV PC,Rm.
+ */
+ *(u32 *)loc &= 0xf000000f;
+ *(u32 *)loc |= 0x01a0f000;
+ break;
+
+ case R_ARM_PREL31:
+ dev_dbg(dev, "R_ARM_PREL31\n");
+ offset = *(u32 *)loc + sym->st_value - adsp_loc;
+ *(u32 *)loc = offset & 0x7fffffff;
+ break;
+
+ case R_ARM_MOVW_ABS_NC:
+ case R_ARM_MOVT_ABS:
+ dev_dbg(dev, "R_ARM_MOVT_ABS\n");
+ offset = *(u32 *)loc;
+ offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
+ offset = (offset ^ 0x8000) - 0x8000;
+
+ offset += sym->st_value;
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
+ offset >>= 16;
+
+ *(u32 *)loc &= 0xfff0f000;
+ *(u32 *)loc |= ((offset & 0xf000) << 4) |
+ (offset & 0x0fff);
+ break;
+
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ dev_dbg(dev, "R_ARM_THM_CALL R_ARM_THM_JUMP24\n");
+ upper = *(u16 *)loc;
+ lower = *(u16 *)(loc + 2);
+
+ /*
+ * 25 bit signed address range (Thumb-2 BL and B.W
+ * instructions):
+ * S:I1:I2:imm10:imm11:0
+ * where:
+ * S = upper[10] = offset[24]
+ * I1 = ~(J1 ^ S) = offset[23]
+ * I2 = ~(J2 ^ S) = offset[22]
+ * imm10 = upper[9:0] = offset[21:12]
+ * imm11 = lower[10:0] = offset[11:1]
+ * J1 = lower[13]
+ * J2 = lower[11]
+ */
+ sign = (upper >> 10) & 1;
+ j1 = (lower >> 13) & 1;
+ j2 = (lower >> 11) & 1;
+ offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
+ ((~(j2 ^ sign) & 1) << 22) |
+ ((upper & 0x03ff) << 12) |
+ ((lower & 0x07ff) << 1);
+ if (offset & 0x01000000)
+ offset -= 0x02000000;
+ offset += sym->st_value - adsp_loc;
+
+ /*
+ * For function symbols, only Thumb addresses are
+ * allowed (no interworking).
+ *
+ * For non-function symbols, the destination
+ * has no specific ARM/Thumb disposition, so
+ * the branch is resolved under the assumption
+ * that interworking is not required.
+ */
+ if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
+ !(offset & 1)) {
+ dev_dbg(dev,
+ "switching the mode from THUMB to ARM\n");
+ switch_mode = true;
+ offset = ALIGN(offset, 4);
+ }
+
+ if (offset <= (s32)0xff000000 ||
+ offset >= (s32)0x01000000) {
+ dev_err(dev,
+ "%s: section %u reloc %u sym '%s': relocation %u out of range (%p -> %#x)\n",
+ module->name, relindex, i, symname,
+ ELF32_R_TYPE(rel->r_info), loc,
+ sym->st_value);
+ return -ENOEXEC;
+ }
+
+ sign = (offset >> 24) & 1;
+ j1 = sign ^ (~(offset >> 23) & 1);
+ j2 = sign ^ (~(offset >> 22) & 1);
+ *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+ ((offset >> 12) & 0x03ff));
+ *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
+ (j1 << 13) | (j2 << 11) |
+ ((offset >> 1) & 0x07ff));
+
+ if (switch_mode) {
+ lower = *(u16 *)(loc + 2);
+ lower &= (~(1 << 12));
+ *(u16 *)(loc + 2) = lower;
+ }
+
+ dev_dbg(dev,
+ "%s address 0x%x upper instruction 0x%x\n",
+ symname, adsp_loc, *(u16 *)loc);
+ dev_dbg(dev,
+ "%s address 0x%x lower instruction 0x%x\n",
+ symname, adsp_loc, *(u16 *)(loc + 2));
+ break;
+
+ case R_ARM_THM_MOVW_ABS_NC:
+ case R_ARM_THM_MOVT_ABS:
+ dev_dbg(dev, "in R_ARM_THM_MOVT_ABS\n");
+ upper = *(u16 *)loc;
+ lower = *(u16 *)(loc + 2);
+
+ /*
+ * MOVT/MOVW instructions encoding in Thumb-2:
+ *
+ * i = upper[10]
+ * imm4 = upper[3:0]
+ * imm3 = lower[14:12]
+ * imm8 = lower[7:0]
+ *
+ * imm16 = imm4:i:imm3:imm8
+ */
+ offset = ((upper & 0x000f) << 12) |
+ ((upper & 0x0400) << 1) |
+ ((lower & 0x7000) >> 4) | (lower & 0x00ff);
+ offset = (offset ^ 0x8000) - 0x8000;
+ offset += sym->st_value;
+
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
+ offset >>= 16;
+
+ *(u16 *)loc = (u16)((upper & 0xfbf0) |
+ ((offset & 0xf000) >> 12) |
+ ((offset & 0x0800) >> 1));
+ *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
+ ((offset & 0x0700) << 4) |
+ (offset & 0x00ff));
+ break;
+
+ default:
+ dev_err(dev, "%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel->r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+static int
+apply_relocations(struct adsp_module *mod,
+ const struct load_info *info)
+{
+ unsigned int i;
+ int err = 0;
+
+ /* Now do relocations. */
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ unsigned int infosec = info->sechdrs[i].sh_info;
+
+ /* Not a valid relocation section? */
+ if (infosec >= info->hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
+ continue;
+
+ if (info->sechdrs[i].sh_type == SHT_REL)
+ err = apply_relocate(info, info->sechdrs, info->strtab,
+ info->index.sym, i, mod);
+ else if (info->sechdrs[i].sh_type == SHT_RELA)
+ return -EINVAL;
+ if (err < 0)
+ break;
+ }
+ return err;
+}
+
+static int
+simplify_symbols(struct adsp_module *mod,
+ const struct load_info *info)
+{
+ Elf32_Shdr *symsec = &info->sechdrs[info->index.sym];
+ Elf32_Sym *sym = mod->module_ptr + symsec->sh_addr;
+ unsigned int secbase;
+ unsigned int i;
+ int ret = 0;
+ struct global_sym_info *sym_info;
+ struct device *dev = info->dev;
+
+ for (i = 1; i < symsec->sh_size / sizeof(Elf32_Sym); i++) {
+ const char *name = info->strtab + sym[i].st_name;
+ dev_dbg(dev, "%s\n", name);
+ switch (sym[i].st_shndx) {
+ case SHN_COMMON:
+ /* We compiled with -fno-common. These are not
+ supposed to happen. */
+ dev_err(dev, "Common symbol: '%s'\n", name);
+ dev_err(dev,
+ "please compile module %s with -fno-common\n",
+ mod->name);
+ ret = -ENOEXEC;
+ goto end;
+
+ case SHN_ABS:
+ /* Don't need to do anything */
+ dev_dbg(dev, "Absolute symbol: 0x%08lx\n",
+ (long)sym[i].st_value);
+ break;
+
+ case SHN_UNDEF:
+ sym_info = find_global_symbol(name);
+
+ /* Ok if resolved. */
+ if (sym_info) {
+ dev_dbg(dev, "SHN_UNDEF sym '%s':0x%x\n",
+ name, sym_info->addr);
+ sym[i].st_value = sym_info->addr;
+ sym[i].st_info = sym_info->info;
+ break;
+ }
+
+ if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK) {
+ dev_dbg(dev, "WEAK SYM %s not resolved\n",
+ name);
+ break;
+ }
+
+ dev_err(dev, "No symbol '%s' found\n", name);
+ ret = -ENOEXEC;
+ goto end;
+
+ default:
+ /* Divert to percpu allocation if a percpu var. */
+ dev_dbg(dev, "default\n");
+ secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
+ sym[i].st_value += secbase + mod->adsp_module_ptr;
+ dev_dbg(dev, "symbol %s is 0x%x\n",
+ name, sym[i].st_value);
+ break;
+ }
+ }
+end:
+ return ret;
+}
+
+static int move_module(struct adsp_module *mod, struct load_info *info)
+{
+ struct device *dev = info->dev;
+ int i;
+
+ mod->handle = dram_app_mem_request(info->name, mod->size);
+ if (!mod->handle) {
+ dev_err(dev, "cannot allocate memory for app %s\n", info->name);
+ return -ENOMEM;
+ }
+ mod->adsp_module_ptr = dram_app_mem_get_address(mod->handle);
+ mod->module_ptr = nvadsp_da_to_va_mappings(mod->adsp_module_ptr,
+ mod->size);
+ dev_info(dev, "module %s Load address %p 0x%x\n", info->name,
+ mod->module_ptr, mod->adsp_module_ptr);
+ /* Transfer each section which specifies SHF_ALLOC */
+ dev_dbg(dev, "final section addresses:\n");
+ for (i = 0; i < info->hdr->e_shnum; i++) {
+ void *dest;
+ struct elf32_shdr *shdr = &info->sechdrs[i];
+
+ if (!(shdr->sh_flags & SHF_ALLOC))
+ continue;
+
+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
+ dev_dbg(dev, "%s %d\n",
+ info->secstrings + shdr->sh_name,
+ shdr->sh_entsize);
+ dest = mod->module_ptr
+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+ } else {
+ dev_dbg(dev, "%s %d\n",
+ info->secstrings + shdr->sh_name,
+ shdr->sh_entsize);
+ dest = mod->module_ptr + shdr->sh_entsize;
+ }
+
+ if (shdr->sh_type != SHT_NOBITS)
+ memcpy(dest,
+ (void *)info->hdr + shdr->sh_offset,
+ shdr->sh_size);
+ /* Update sh_addr to point to copy in image. */
+ shdr->sh_addr = (uint32_t)(dest - mod->module_ptr);
+ dev_dbg(dev, "name %s 0x%x %p 0x%x 0x%x\n",
+ info->secstrings + shdr->sh_name, shdr->sh_addr,
+ dest, shdr->sh_addr + mod->adsp_module_ptr,
+ shdr->sh_size);
+ }
+
+ return 0;
+}
+
+static int get_offset(struct adsp_module *mod, size_t *size,
+ struct elf32_shdr *sechdr, unsigned int section)
+{
+ int ret;
+
+ ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
+ *size = ret + sechdr->sh_size;
+ return ret;
+}
+
+static bool
+is_core_symbol(const struct elf32_sym *src,
+ const struct elf32_shdr *sechdrs, unsigned int shnum)
+{
+ const struct elf32_shdr *sec;
+
+ if (src->st_shndx == SHN_UNDEF
+ || src->st_shndx >= shnum
+ || !src->st_name)
+ return false;
+
+ sec = sechdrs + src->st_shndx;
+ if (!(sec->sh_flags & SHF_ALLOC)
+#ifndef CONFIG_KALLSYMS_ALL
+ || !(sec->sh_flags & SHF_EXECINSTR)
+#endif
+ || (sec->sh_entsize & INIT_OFFSET_MASK))
+ return false;
+
+ return true;
+}
+
+static void layout_sections(struct adsp_module *mod, struct load_info *info)
+{
+ static unsigned long const masks[][2] = {
+ /* NOTE: all executable code must be the first section
+ * in this array; otherwise modify the text_size
+ * finder in the two loops below */
+ { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
+ { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
+ { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
+ { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
+ };
+ unsigned int m, i;
+ struct device *dev = info->dev;
+
+ for (i = 0; i < info->hdr->e_shnum; i++)
+ info->sechdrs[i].sh_entsize = ~0U;
+
+ dev_dbg(dev, "Core section allocation order:\n");
+ for (m = 0; m < ARRAY_SIZE(masks); ++m) {
+ for (i = 0; i < info->hdr->e_shnum; ++i) {
+ struct elf32_shdr *s = &info->sechdrs[i];
+ const char *sname = info->secstrings + s->sh_name;
+
+ if ((s->sh_flags & masks[m][0]) != masks[m][0]
+ || (s->sh_flags & masks[m][1])
+ || s->sh_entsize != ~0U
+ || strstarts(sname, ".init"))
+ continue;
+
+ s->sh_entsize = get_offset(mod, &mod->size, s, i);
+ dev_dbg(dev, "\t%s %d\n", sname, s->sh_entsize);
+ }
+ }
+
+ dev_dbg(dev, "Init section allocation order:\n");
+ for (m = 0; m < ARRAY_SIZE(masks); ++m) {
+ for (i = 0; i < info->hdr->e_shnum; ++i) {
+ struct elf32_shdr *s = &info->sechdrs[i];
+ const char *sname = info->secstrings + s->sh_name;
+
+ if ((s->sh_flags & masks[m][0]) != masks[m][0]
+ || (s->sh_flags & masks[m][1])
+ || s->sh_entsize != ~0U
+ || !strstarts(sname, ".init"))
+ continue;
+ s->sh_entsize = (get_offset(mod, &mod->size, s, i)
+ | INIT_OFFSET_MASK);
+ dev_dbg(dev, "\t%s %d\n", sname, s->sh_entsize);
+ }
+ }
+}
+
+static int rewrite_section_headers(struct load_info *info)
+{
+ unsigned int i;
+ struct device *dev = info->dev;
+
+ /* This should always be true, but let's be sure. */
+ info->sechdrs[0].sh_addr = 0;
+
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ struct elf32_shdr *shdr = &info->sechdrs[i];
+ if (shdr->sh_type != SHT_NOBITS
+ && info->len < shdr->sh_offset + shdr->sh_size) {
+ dev_err(dev, "Module len %lu truncated\n", info->len);
+ return -ENOEXEC;
+ }
+
+ /* Mark all sections sh_addr with their address in the
+ temporary image. */
+ shdr->sh_addr = shdr->sh_offset;
+
+ }
+ return 0;
+}
+
+
+static struct adsp_module *setup_load_info(struct load_info *info)
+{
+ unsigned int i;
+ int err;
+ struct adsp_module *mod;
+ struct device *dev = info->dev;
+
+ /* Set up the convenience variables */
+ info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
+ info->secstrings = (void *)info->hdr
+ + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
+
+ err = rewrite_section_headers(info);
+ if (err)
+ return ERR_PTR(err);
+
+ /* Find internal symbols and strings. */
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
+ info->index.sym = i;
+ info->index.str = info->sechdrs[i].sh_link;
+ info->strtab = (char *)info->hdr
+ + info->sechdrs[info->index.str].sh_offset;
+ break;
+ }
+ }
+
+ /* This is temporary: point mod into copy of data. */
+ mod = kzalloc(sizeof(struct adsp_module), GFP_KERNEL);
+ if (!mod) {
+ dev_err(dev, "Unable to create module\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (info->index.sym == 0) {
+ dev_warn(dev, "%s: module has no symbols (stripped?)\n",
+ info->name);
+ kfree(mod);
+ return ERR_PTR(-ENOEXEC);
+ }
+
+ return mod;
+}
+
+
+static void layout_symtab(struct adsp_module *mod, struct load_info *info)
+{
+ struct elf32_shdr *symsect = info->sechdrs + info->index.sym;
+ struct elf32_shdr *strsect = info->sechdrs + info->index.str;
+ const struct elf32_sym *src;
+ unsigned int i, nsrc, ndst, strtab_size = 0;
+ struct device *dev = info->dev;
+
+ /* Put symbol section at end of init part of module. */
+ symsect->sh_flags |= SHF_ALLOC;
+ symsect->sh_entsize = get_offset(mod, &mod->size, symsect,
+ info->index.sym) | INIT_OFFSET_MASK;
+ dev_dbg(dev, "\t%s %d\n", info->secstrings + symsect->sh_name,
+ symsect->sh_entsize);
+
+ src = (void *)info->hdr + symsect->sh_offset;
+ nsrc = symsect->sh_size / sizeof(*src);
+
+ /* Compute total space required for the core symbols' strtab. */
+ for (ndst = i = 0; i < nsrc; i++) {
+ if (i == 0 ||
+ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
+ strtab_size += strlen(&info->strtab[src[i].st_name])+1;
+ ndst++;
+ }
+ }
+
+ /* Append room for core symbols at end of core part. */
+ info->symoffs = ALIGN(mod->size, symsect->sh_addralign ?: 1);
+ info->stroffs = mod->size = info->symoffs + ndst * sizeof(Elf32_Sym);
+ mod->size += strtab_size;
+
+ /* Put string table section at end of init part of module. */
+ strsect->sh_flags |= SHF_ALLOC;
+ strsect->sh_entsize = get_offset(mod, &mod->size, strsect,
+ info->index.str) | INIT_OFFSET_MASK;
+ dev_dbg(dev, "\t%s %d\n",
+ info->secstrings + strsect->sh_name,
+ symsect->sh_entsize);
+}
+
+static struct adsp_module *layout_and_allocate(struct load_info *info)
+{
+ /* Module within temporary copy. */
+ struct adsp_module *mod;
+ int err;
+
+ mod = setup_load_info(info);
+ if (IS_ERR(mod))
+ return mod;
+
+ mod->name = info->name;
+
+ /* Determine total sizes, and put offsets in sh_entsize. For now
+ this is done generically; there doesn't appear to be any
+ special cases for the architectures. */
+ layout_sections(mod, info);
+ layout_symtab(mod, info);
+
+ /* Allocate and move to the final place */
+ err = move_module(mod, info);
+ if (err) {
+ /* TODO: need to handle error path more genericly */
+ kfree(mod);
+ return ERR_PTR(err);
+ }
+
+ return mod;
+}
+
+static int elf_check_arch_arm32(const struct elf32_hdr *x)
+{
+ unsigned int eflags;
+
+ /* Make sure it's an ARM executable */
+ if (x->e_machine != EM_ARM)
+ return 0;
+
+ /* Make sure the entry address is reasonable */
+ if (x->e_entry & 1) {
+ if (!(ELF_HWCAP & HWCAP_THUMB))
+ return 0;
+ } else if (x->e_entry & 3)
+ return 0;
+
+ eflags = x->e_flags;
+ if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
+ unsigned int flt_fmt;
+
+ /* APCS26 is only allowed if the CPU supports it */
+ if ((eflags & EF_ARM_APCS_26) && !(ELF_HWCAP & HWCAP_26BIT))
+ return 0;
+
+ flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
+
+ /* VFP requires the supporting code */
+ if (flt_fmt == EF_ARM_VFP_FLOAT && !(ELF_HWCAP & HWCAP_VFP))
+ return 0;
+ }
+ return 1;
+}
+
+static int elf_header_check(struct load_info *info)
+{
+ if (info->len < sizeof(*(info->hdr)))
+ return -ENOEXEC;
+
+ if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
+ || info->hdr->e_type != ET_REL
+ || !elf_check_arch_arm32(info->hdr)
+ || info->hdr->e_shentsize != sizeof(Elf32_Shdr))
+ return -ENOEXEC;
+
+ if (info->hdr->e_shoff >= info->len
+ || (info->hdr->e_shnum * sizeof(Elf32_Shdr) >
+ info->len - info->hdr->e_shoff))
+ return -ENOEXEC;
+
+ return 0;
+}
+
+struct adsp_module *load_adsp_static_module(const char *appname,
+ struct adsp_shared_app *shared_app, struct device *dev)
+{
+ struct adsp_module *mod = NULL;
+
+ mod = kzalloc(sizeof(struct adsp_module), GFP_KERNEL);
+ if (!mod)
+ return NULL;
+
+ memcpy((struct app_mem_size *)&mod->mem_size,
+ &shared_app->mem_size, sizeof(shared_app->mem_size));
+
+ mod->adsp_module_ptr = shared_app->mod_ptr;
+ mod->dynamic = false;
+ memcpy(mod->version, shared_app->version, sizeof(shared_app->version));
+
+ return mod;
+}
+
+struct adsp_module *load_adsp_dynamic_module(const char *appname,
+ const char *appfile, struct device *dev)
+{
+ struct load_info info = { };
+ struct adsp_module *mod = NULL;
+ const struct firmware *fw;
+ struct elf32_shdr *data_shdr;
+ struct elf32_shdr *shared_shdr;
+ struct elf32_shdr *shared_wc_shdr;
+ struct elf32_shdr *aram_shdr;
+ struct elf32_shdr *aram_x_shdr;
+ struct app_mem_size *mem_size;
+ void *buf;
+ int ret;
+
+ ret = request_firmware(&fw, appfile, dev);
+ if (ret < 0) {
+ dev_err(dev,
+ "request firmware for %s(%s) failed with %d\n",
+ appname, appfile, ret);
+ return ERR_PTR(ret);
+ }
+
+ buf = kzalloc(fw->size, GFP_KERNEL);
+ if (!buf)
+ goto release_firmware;
+
+ memcpy(buf, fw->data, fw->size);
+
+ info.hdr = (struct elf32_hdr *)buf;
+ info.len = fw->size;
+ info.dev = dev;
+ info.name = appname;
+
+ ret = elf_header_check(&info);
+ if (ret) {
+ dev_err(dev,
+ "%s is not an elf file\n", appfile);
+ goto error_free_memory;
+ }
+
+ /* Figure out module layout, and allocate all the memory. */
+ mod = layout_and_allocate(&info);
+ if (IS_ERR_OR_NULL(mod))
+ goto error_free_memory;
+
+ /* update adsp specific sections */
+ data_shdr = nvadsp_get_section(fw, ".dram_data");
+ shared_shdr = nvadsp_get_section(fw, ".dram_shared");
+ shared_wc_shdr = nvadsp_get_section(fw, ".dram_shared_wc");
+ aram_shdr = nvadsp_get_section(fw, ".aram_data");
+ aram_x_shdr = nvadsp_get_section(fw, ".aram_x_data");
+
+ mem_size = (void *)&mod->mem_size;
+
+ if (data_shdr) {
+ dev_dbg(dev, "mem_size.dram_data %d\n",
+ data_shdr->sh_size);
+ mem_size->dram = data_shdr->sh_size;
+ }
+
+ if (shared_shdr) {
+ dev_dbg(dev, "mem_size.dram_shared %d\n",
+ shared_shdr->sh_size);
+ mem_size->dram_shared =
+ shared_shdr->sh_size;
+ }
+
+ if (shared_wc_shdr) {
+ dev_dbg(dev, "shared_wc_shdr->sh_size %d\n",
+ shared_wc_shdr->sh_size);
+ mem_size->dram_shared_wc =
+ shared_wc_shdr->sh_size;
+ }
+
+ if (aram_shdr) {
+ dev_dbg(dev, "aram_shdr->sh_size %d\n", aram_shdr->sh_size);
+ mem_size->aram = aram_shdr->sh_size;
+ }
+
+ if (aram_x_shdr) {
+ dev_dbg(dev,
+ "aram_x_shdr->sh_size %d\n", aram_x_shdr->sh_size);
+ mem_size->aram_x = aram_x_shdr->sh_size;
+ }
+
+ /* Fix up syms, so that st_value is a pointer to location. */
+ ret = simplify_symbols(mod, &info);
+ if (ret) {
+ dev_err(dev, "Unable to simplify symbols\n");
+ goto unload_module;
+ }
+
+ dev_dbg(dev, "applying relocation\n");
+ ret = apply_relocations(mod, &info);
+ if (ret) {
+ dev_err(dev, "relocation failed\n");
+ goto unload_module;
+ }
+
+ mod->dynamic = true;
+
+ error_free_memory:
+ kfree(buf);
+ release_firmware:
+ release_firmware(fw);
+ return ret ? ERR_PTR(ret) : mod;
+
+ unload_module:
+ kfree(buf);
+ unload_adsp_module(mod);
+ release_firmware(fw);
+ return ERR_PTR(ret);
+}
+
+void unload_adsp_module(struct adsp_module *mod)
+{
+ dram_app_mem_release(mod->handle);
+ kfree(mod);
+}
diff --git a/drivers/platform/tegra/nvadsp/aram_manager.c b/drivers/platform/tegra/nvadsp/aram_manager.c
new file mode 100644
index 00000000..e575037b
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/aram_manager.c
@@ -0,0 +1,96 @@
+/*
+ * aram_managerc
+ *
+ * ARAM manager
+ *
+ * Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
+
+#include
+#include
+
+#include "aram_manager.h"
+
+static void *aram_handle;
+
+static LIST_HEAD(aram_alloc_list);
+static LIST_HEAD(aram_free_list);
+
+void nvadsp_aram_print(void)
+{
+ mem_print(aram_handle);
+}
+
+void *nvadsp_aram_request(const char *name, size_t size)
+{
+ return mem_request(aram_handle, name, size);
+}
+
+bool nvadsp_aram_release(void *handle)
+{
+ return mem_release(aram_handle, handle);
+}
+
+unsigned long nvadsp_aram_get_address(void *handle)
+{
+ return mem_get_address(handle);
+}
+
+static struct dentry *aram_dump_debugfs_file;
+
+static int nvadsp_aram_dump(struct seq_file *s, void *data)
+{
+ mem_dump(aram_handle, s);
+ return 0;
+}
+
+static int nvadsp_aram_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvadsp_aram_dump, inode->i_private);
+}
+
+static const struct file_operations aram_dump_fops = {
+ .open = nvadsp_aram_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int nvadsp_aram_init(unsigned long addr, unsigned long size)
+{
+ aram_handle = create_mem_manager("ARAM", addr, size);
+ if (IS_ERR(aram_handle)) {
+ pr_err("ERROR: failed to create aram memory_manager");
+ return PTR_ERR(aram_handle);
+ }
+
+ if (debugfs_initialized()) {
+ aram_dump_debugfs_file = debugfs_create_file("aram_dump",
+ S_IRUSR, NULL, NULL, &aram_dump_fops);
+ if (!aram_dump_debugfs_file) {
+ pr_err("ERROR: failed to create aram_dump debugfs");
+ destroy_mem_manager(aram_handle);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+void nvadsp_aram_exit(void)
+{
+ debugfs_remove(aram_dump_debugfs_file);
+ destroy_mem_manager(aram_handle);
+}
+
diff --git a/drivers/platform/tegra/nvadsp/aram_manager.h b/drivers/platform/tegra/nvadsp/aram_manager.h
new file mode 100644
index 00000000..18e8f887
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/aram_manager.h
@@ -0,0 +1,23 @@
+/*
+ * Header file for aram manager
+ *
+ * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_NVADSP_ARAM_MANAGER_H
+#define __TEGRA_NVADSP_ARAM_MANAGER_H
+
+#include "mem_manager.h"
+
+int nvadsp_aram_init(unsigned long addr, unsigned long size);
+void nvadsp_aram_exit(void);
+#endif /* __TEGRA_NVADSP_ARAM_MANAGER_H */
diff --git a/drivers/platform/tegra/nvadsp/dev-t18x.c b/drivers/platform/tegra/nvadsp/dev-t18x.c
new file mode 100644
index 00000000..2e70ebd0
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/dev-t18x.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2015-2021, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include
+#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+#include
+#else
+#include
+#endif
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#ifdef CONFIG_TEGRA_VIRT_AUDIO_IVC
+#include "tegra_virt_alt_ivc_common.h"
+#include "tegra_virt_alt_ivc.h"
+#endif
+
+#include "dev.h"
+#include "dev-t18x.h"
+
+#ifdef CONFIG_PM
+static int nvadsp_t18x_clocks_disable(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ /* APE and APB2APE clocks which are required by NVADSP are controlled
+ * from parent ACONNECT bus driver
+ */
+ if (drv_data->adsp_clk) {
+ clk_disable_unprepare(drv_data->adsp_clk);
+ dev_dbg(dev, "adsp clocks disabled\n");
+ drv_data->adsp_clk = NULL;
+ }
+
+ if (drv_data->aclk_clk) {
+ clk_disable_unprepare(drv_data->aclk_clk);
+ dev_dbg(dev, "aclk clock disabled\n");
+ drv_data->aclk_clk = NULL;
+ }
+
+ if (drv_data->adsp_neon_clk) {
+ clk_disable_unprepare(drv_data->adsp_neon_clk);
+ dev_dbg(dev, "adsp_neon clocks disabled\n");
+ drv_data->adsp_neon_clk = NULL;
+ }
+
+ return 0;
+}
+
+static int nvadsp_t18x_clocks_enable(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+ /* APE and APB2APE clocks which are required by NVADSP are controlled
+ * from parent ACONNECT bus driver
+ */
+ drv_data->adsp_clk = devm_clk_get(dev, "adsp");
+ if (IS_ERR_OR_NULL(drv_data->adsp_clk)) {
+ dev_err(dev, "unable to find adsp clock\n");
+ ret = PTR_ERR(drv_data->adsp_clk);
+ goto end;
+ }
+ ret = clk_prepare_enable(drv_data->adsp_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable adsp clock\n");
+ goto end;
+ }
+
+ drv_data->aclk_clk = devm_clk_get(dev, "aclk");
+ if (IS_ERR_OR_NULL(drv_data->aclk_clk)) {
+ dev_err(dev, "unable to find aclk clock\n");
+ ret = PTR_ERR(drv_data->aclk_clk);
+ goto end;
+ }
+ ret = clk_prepare_enable(drv_data->aclk_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable aclk clock\n");
+ goto end;
+ }
+
+ drv_data->adsp_neon_clk = devm_clk_get(dev, "adspneon");
+ if (IS_ERR_OR_NULL(drv_data->adsp_neon_clk)) {
+ dev_err(dev, "unable to find adsp neon clock\n");
+ ret = PTR_ERR(drv_data->adsp_neon_clk);
+ goto end;
+ }
+ ret = clk_prepare_enable(drv_data->adsp_neon_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable adsp neon clock\n");
+ goto end;
+ }
+ dev_dbg(dev, "adsp neon clock enabled\n");
+
+ dev_dbg(dev, "all clocks enabled\n");
+ return 0;
+ end:
+ nvadsp_t18x_clocks_disable(pdev);
+ return ret;
+}
+
+static int __nvadsp_t18x_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ dev_dbg(dev, "at %s:%d\n", __func__, __LINE__);
+
+ ret = nvadsp_t18x_clocks_enable(pdev);
+ if (ret) {
+ dev_dbg(dev, "failed in nvadsp_t18x_clocks_enable\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int __nvadsp_t18x_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ dev_dbg(dev, "at %s:%d\n", __func__, __LINE__);
+
+ return nvadsp_t18x_clocks_disable(pdev);
+}
+
+static int __nvadsp_t18x_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "at %s:%d\n", __func__, __LINE__);
+ return 0;
+}
+
+int nvadsp_pm_t18x_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "at %s:%d\n", __func__, __LINE__);
+
+ d->runtime_suspend = __nvadsp_t18x_runtime_suspend;
+ d->runtime_resume = __nvadsp_t18x_runtime_resume;
+ d->runtime_idle = __nvadsp_t18x_runtime_idle;
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static int __assert_t18x_adsp(struct nvadsp_drv_data *d)
+{
+ struct platform_device *pdev = d->pdev;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ /*
+ * The ADSP_ALL reset in BPMP-FW is overloaded to assert
+ * all 7 resets i.e. ADSP, ADSPINTF, ADSPDBG, ADSPNEON,
+ * ADSPPERIPH, ADSPSCU and ADSPWDT resets. So resetting
+ * only ADSP reset is sufficient to reset all ADSP sub-modules.
+ */
+ ret = reset_control_assert(d->adspall_rst);
+ if (ret) {
+ dev_err(dev, "failed to assert adsp\n");
+ goto end;
+ }
+
+ /* APE_TKE reset */
+ if (d->ape_tke_rst) {
+ ret = reset_control_assert(d->ape_tke_rst);
+ if (ret)
+ dev_err(dev, "failed to assert ape_tke\n");
+ }
+
+end:
+ return ret;
+}
+
+static int __deassert_t18x_adsp(struct nvadsp_drv_data *d)
+{
+ struct platform_device *pdev = d->pdev;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ /* APE_TKE reset */
+ if (d->ape_tke_rst) {
+ ret = reset_control_deassert(d->ape_tke_rst);
+ if (ret) {
+ dev_err(dev, "failed to deassert ape_tke\n");
+ goto end;
+ }
+ }
+
+ /*
+ * The ADSP_ALL reset in BPMP-FW is overloaded to de-assert
+ * all 7 resets i.e. ADSP, ADSPINTF, ADSPDBG, ADSPNEON, ADSPPERIPH,
+ * ADSPSCU and ADSPWDT resets. The BPMP-FW also takes care
+ * of specific de-assert sequence and delays between them.
+ * So de-resetting only ADSP reset is sufficient to de-reset
+ * all ADSP sub-modules.
+ */
+ ret = reset_control_deassert(d->adspall_rst);
+ if (ret)
+ dev_err(dev, "failed to deassert adsp\n");
+
+end:
+ return ret;
+}
+
+#ifdef CONFIG_TEGRA_VIRT_AUDIO_IVC
+static int __virt_assert_t18x_adsp(struct nvadsp_drv_data *d)
+{
+ int err;
+ struct nvaudio_ivc_msg msg;
+ struct nvaudio_ivc_ctxt *hivc_client = nvaudio_get_ivc_alloc_ctxt();
+
+ if (!hivc_client) {
+ pr_err("%s: Failed to allocate IVC context\n", __func__);
+ return -ENODEV;
+ }
+
+ memset(&msg, 0, sizeof(struct nvaudio_ivc_msg));
+ msg.cmd = NVAUDIO_ADSP_RESET;
+ msg.params.adsp_reset_info.reset_req = ASSERT;
+ msg.ack_required = true;
+
+ err = nvaudio_ivc_send_receive(hivc_client,
+ &msg,
+ sizeof(struct nvaudio_ivc_msg));
+ if (err < 0)
+ pr_err("%s: error on ivc_send_receive\n", __func__);
+
+ return 0;
+}
+
+static int __virt_deassert_t18x_adsp(struct nvadsp_drv_data *d)
+{
+ int err;
+ struct nvaudio_ivc_msg msg;
+ struct nvaudio_ivc_ctxt *hivc_client = nvaudio_get_ivc_alloc_ctxt();
+
+ if (!hivc_client) {
+ pr_err("%s: Failed to allocate IVC context\n", __func__);
+ return -ENODEV;
+ }
+
+ memset(&msg, 0, sizeof(struct nvaudio_ivc_msg));
+ msg.cmd = NVAUDIO_ADSP_RESET;
+ msg.params.adsp_reset_info.reset_req = DEASSERT;
+ msg.ack_required = true;
+
+ err = nvaudio_ivc_send_receive(hivc_client,
+ &msg,
+ sizeof(struct nvaudio_ivc_msg));
+ if (err < 0)
+ pr_err("%s: error on ivc_send_receive\n", __func__);
+
+ return 0;
+}
+#endif
+
+int nvadsp_reset_t18x_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+#ifdef CONFIG_TEGRA_VIRT_AUDIO_IVC
+
+ if (is_tegra_hypervisor_mode()) {
+ d->assert_adsp = __virt_assert_t18x_adsp;
+ d->deassert_adsp = __virt_deassert_t18x_adsp;
+ d->adspall_rst = NULL;
+ return 0;
+ }
+#endif
+
+ d->assert_adsp = __assert_t18x_adsp;
+ d->deassert_adsp = __deassert_t18x_adsp;
+ d->adspall_rst = devm_reset_control_get(dev, "adspall");
+ if (IS_ERR(d->adspall_rst)) {
+ dev_err(dev, "can not get adspall reset\n");
+ ret = PTR_ERR(d->adspall_rst);
+ goto end;
+ }
+
+ d->ape_tke_rst = devm_reset_control_get(dev, "ape_tke");
+ if (IS_ERR(d->ape_tke_rst))
+ d->ape_tke_rst = NULL;
+
+end:
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/dev-t18x.h b/drivers/platform/tegra/nvadsp/dev-t18x.h
new file mode 100644
index 00000000..fe0dd8fb
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/dev-t18x.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015-2021, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEGRA_NVADSP_DEV_T18X_H
+#define __TEGRA_NVADSP_DEV_T18X_H
+
+int nvadsp_acast_t18x_init(struct platform_device *pdev);
+int nvadsp_reset_t18x_init(struct platform_device *pdev);
+int nvadsp_os_t18x_init(struct platform_device *pdev);
+int nvadsp_pm_t18x_init(struct platform_device *pdev);
+
+#endif /* __TEGRA_NVADSP_DEV_T18X_H */
diff --git a/drivers/platform/tegra/nvadsp/dev-t21x.c b/drivers/platform/tegra/nvadsp/dev-t21x.c
new file mode 100644
index 00000000..6ae797e2
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/dev-t21x.c
@@ -0,0 +1,306 @@
+/*
+ * dev-t21x.c
+ *
+ * A device driver for ADSP and APE
+ *
+ * Copyright (C) 2014-2017, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "dev.h"
+#include "amc.h"
+#include "dev-t21x.h"
+
+#ifdef CONFIG_PM
+static void nvadsp_clocks_disable(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ if (drv_data->adsp_clk) {
+ clk_disable_unprepare(drv_data->adsp_clk);
+ dev_dbg(dev, "adsp clocks disabled\n");
+ drv_data->adsp_clk = NULL;
+ }
+
+ if (drv_data->adsp_cpu_abus_clk) {
+ clk_disable_unprepare(drv_data->adsp_cpu_abus_clk);
+ dev_dbg(dev, "adsp cpu abus clock disabled\n");
+ drv_data->adsp_cpu_abus_clk = NULL;
+ }
+
+ if (drv_data->adsp_neon_clk) {
+ clk_disable_unprepare(drv_data->adsp_neon_clk);
+ dev_dbg(dev, "adsp_neon clocks disabled\n");
+ drv_data->adsp_neon_clk = NULL;
+ }
+
+ if (drv_data->ape_clk) {
+ clk_disable_unprepare(drv_data->ape_clk);
+ dev_dbg(dev, "ape clock disabled\n");
+ drv_data->ape_clk = NULL;
+ }
+
+ if (drv_data->apb2ape_clk) {
+ clk_disable_unprepare(drv_data->apb2ape_clk);
+ dev_dbg(dev, "apb2ape clock disabled\n");
+ drv_data->apb2ape_clk = NULL;
+ }
+}
+
+static int nvadsp_clocks_enable(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ drv_data->ape_clk = devm_clk_get(dev, "adsp.ape");
+ if (IS_ERR_OR_NULL(drv_data->ape_clk)) {
+ dev_err(dev, "unable to find adsp.ape clock\n");
+ ret = PTR_ERR(drv_data->ape_clk);
+ goto end;
+ }
+ ret = clk_prepare_enable(drv_data->ape_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable adsp.ape clock\n");
+ goto end;
+ }
+ dev_dbg(dev, "ape clock enabled\n");
+
+ drv_data->adsp_clk = devm_clk_get(dev, "adsp");
+ if (IS_ERR_OR_NULL(drv_data->adsp_clk)) {
+ dev_err(dev, "unable to find adsp clock\n");
+ ret = PTR_ERR(drv_data->adsp_clk);
+ goto end;
+ }
+ ret = clk_prepare_enable(drv_data->adsp_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable adsp clock\n");
+ goto end;
+ }
+
+ drv_data->adsp_cpu_abus_clk = devm_clk_get(dev, "adsp_cpu_abus");
+ if (IS_ERR_OR_NULL(drv_data->adsp_cpu_abus_clk)) {
+ dev_err(dev, "unable to find adsp cpu abus clock\n");
+ ret = PTR_ERR(drv_data->adsp_cpu_abus_clk);
+ goto end;
+ }
+ ret = clk_prepare_enable(drv_data->adsp_cpu_abus_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable adsp cpu abus clock\n");
+ goto end;
+ }
+
+ drv_data->adsp_neon_clk = devm_clk_get(dev, "adspneon");
+ if (IS_ERR_OR_NULL(drv_data->adsp_neon_clk)) {
+ dev_err(dev, "unable to find adsp neon clock\n");
+ ret = PTR_ERR(drv_data->adsp_neon_clk);
+ goto end;
+ }
+ ret = clk_prepare_enable(drv_data->adsp_neon_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable adsp neon clock\n");
+ goto end;
+ }
+ dev_dbg(dev, "adsp cpu clock enabled\n");
+
+ drv_data->apb2ape_clk = devm_clk_get(dev, "adsp.apb2ape");
+ if (IS_ERR_OR_NULL(drv_data->apb2ape_clk)) {
+ dev_err(dev, "unable to find adsp.apb2ape clk\n");
+ ret = PTR_ERR(drv_data->apb2ape_clk);
+ goto end;
+ }
+ ret = clk_prepare_enable(drv_data->apb2ape_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable adsp.apb2ape clock\n");
+ goto end;
+ }
+
+ /* AHUB clock, UART clock is not being enabled as UART by default is
+ * disabled on t210
+ */
+ dev_dbg(dev, "all clocks enabled\n");
+ return 0;
+ end:
+ nvadsp_clocks_disable(pdev);
+ return ret;
+}
+
+static inline bool nvadsp_amsic_skip_reg(u32 offset)
+{
+ if (offset == AMISC_ADSP_L2_REGFILEBASE ||
+ offset == AMISC_SHRD_SMP_STA ||
+ (offset >= AMISC_SEM_REG_START && offset <= AMISC_SEM_REG_END) ||
+ offset == AMISC_TSC ||
+ offset == AMISC_ACTMON_AVG_CNT) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int nvadsp_amisc_save(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
+ u32 val, offset;
+ int i = 0;
+
+ offset = AMISC_REG_START_OFFSET;
+ while (offset <= AMISC_REG_MBOX_OFFSET) {
+ if (nvadsp_amsic_skip_reg(offset)) {
+ offset += 4;
+ continue;
+ }
+ val = readl(d->base_regs[AMISC] + offset);
+ d->state.amisc_regs[i++] = val;
+ offset += 4;
+ }
+
+ offset = ADSP_ACTMON_REG_START_OFFSET;
+ while (offset <= ADSP_ACTMON_REG_END_OFFSET) {
+ if (nvadsp_amsic_skip_reg(offset)) {
+ offset += 4;
+ continue;
+ }
+ val = readl(d->base_regs[AMISC] + offset);
+ d->state.amisc_regs[i++] = val;
+ offset += 4;
+ }
+ return 0;
+}
+
+static int nvadsp_amisc_restore(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
+ u32 val, offset;
+ int i = 0;
+
+ offset = AMISC_REG_START_OFFSET;
+ while (offset <= AMISC_REG_MBOX_OFFSET) {
+ if (nvadsp_amsic_skip_reg(offset)) {
+ offset += 4;
+ continue;
+ }
+ val = d->state.amisc_regs[i++];
+ writel(val, d->base_regs[AMISC] + offset);
+ offset += 4;
+ }
+
+ offset = ADSP_ACTMON_REG_START_OFFSET;
+ while (offset <= ADSP_ACTMON_REG_END_OFFSET) {
+ if (nvadsp_amsic_skip_reg(offset)) {
+ offset += 4;
+ continue;
+ }
+ val = d->state.amisc_regs[i++];
+ writel(val, d->base_regs[AMISC] + offset);
+ offset += 4;
+ }
+ return 0;
+}
+
+static int __nvadsp_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ dev_dbg(dev, "restoring adsp base regs\n");
+ drv_data->base_regs = drv_data->base_regs_saved;
+
+ dev_dbg(dev, "enabling clocks\n");
+ ret = nvadsp_clocks_enable(pdev);
+ if (ret) {
+ dev_err(dev, "nvadsp_clocks_enable failed\n");
+ goto skip;
+ }
+
+ if (!drv_data->adsp_os_suspended) {
+ dev_dbg(dev, "%s: adsp os is not suspended\n", __func__);
+ goto skip;
+ }
+
+ dev_dbg(dev, "restoring ape state\n");
+ nvadsp_amc_restore(pdev);
+ nvadsp_aram_restore(pdev);
+ nvadsp_amisc_restore(pdev);
+ skip:
+ return ret;
+}
+
+static int __nvadsp_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (!drv_data->adsp_os_suspended) {
+ dev_dbg(dev, "%s: adsp os is not suspended\n", __func__);
+ goto clocks;
+ }
+
+ dev_dbg(dev, "saving amsic\n");
+ nvadsp_amisc_save(pdev);
+
+ dev_dbg(dev, "saving aram\n");
+ nvadsp_aram_save(pdev);
+
+ dev_dbg(dev, "saving amc\n");
+ nvadsp_amc_save(pdev);
+ clocks:
+ dev_dbg(dev, "disabling clocks\n");
+ nvadsp_clocks_disable(pdev);
+
+ dev_dbg(dev, "locking out adsp base regs\n");
+ drv_data->base_regs = NULL;
+
+ return ret;
+}
+
+static int __nvadsp_runtime_idle(struct device *dev)
+{
+ return 0;
+}
+
+int nvadsp_pm_t21x_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ drv_data->runtime_suspend = __nvadsp_runtime_suspend;
+ drv_data->runtime_resume = __nvadsp_runtime_resume;
+ drv_data->runtime_idle = __nvadsp_runtime_idle;
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+int nvadsp_reset_t21x_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ drv_data->adspall_rst = devm_reset_control_get(dev, "adspall");
+ if (IS_ERR_OR_NULL(drv_data->adspall_rst)) {
+ ret = PTR_ERR(drv_data->adspall_rst);
+ dev_err(dev, "unable to get adspall reset %d\n", ret);
+ }
+
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/dev-t21x.h b/drivers/platform/tegra/nvadsp/dev-t21x.h
new file mode 100644
index 00000000..1f4c2352
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/dev-t21x.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2015-2017, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEGRA_NVADSP_DEV_T21X_H
+#define __TEGRA_NVADSP_DEV_T21X_H
+
+int nvadsp_reset_t21x_init(struct platform_device *pdev);
+int nvadsp_os_t21x_init(struct platform_device *pdev);
+int nvadsp_pm_t21x_init(struct platform_device *pdev);
+
+#endif /* __TEGRA_NVADSP_DEV_T21X_H */
diff --git a/drivers/platform/tegra/nvadsp/dev.c b/drivers/platform/tegra/nvadsp/dev.c
new file mode 100644
index 00000000..6f8f8abf
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/dev.c
@@ -0,0 +1,674 @@
+/*
+ * dev.c
+ *
+ * A device driver for ADSP and APE
+ *
+ * Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+#include
+#else
+#include
+#endif
+#include
+#if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
+#include
+#endif
+#include
+#include
+#include
+#include
+
+#include "dev.h"
+#include "os.h"
+#include "amc.h"
+#include "ape_actmon.h"
+#include "aram_manager.h"
+
+#include "dev-t21x.h"
+#include "dev-t18x.h"
+
+static struct nvadsp_drv_data *nvadsp_drv_data;
+
+#ifdef CONFIG_DEBUG_FS
+static int __init adsp_debug_init(struct nvadsp_drv_data *drv_data)
+{
+ drv_data->adsp_debugfs_root = debugfs_create_dir("tegra_ape", NULL);
+ if (!drv_data->adsp_debugfs_root)
+ return -ENOMEM;
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+
+#ifdef CONFIG_PM
+static int nvadsp_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ int ret = -EINVAL;
+
+ if (drv_data->runtime_resume)
+ ret = drv_data->runtime_resume(dev);
+
+ return ret;
+}
+
+static int nvadsp_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ int ret = -EINVAL;
+
+ if (drv_data->runtime_suspend)
+ ret = drv_data->runtime_suspend(dev);
+
+ return ret;
+}
+
+static int nvadsp_runtime_idle(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (drv_data->runtime_idle)
+ ret = drv_data->runtime_idle(dev);
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int nvadsp_suspend(struct device *dev)
+{
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ return nvadsp_runtime_suspend(dev);
+}
+
+static int nvadsp_resume(struct device *dev)
+{
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ return nvadsp_runtime_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops nvadsp_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(nvadsp_suspend, nvadsp_resume)
+ SET_RUNTIME_PM_OPS(nvadsp_runtime_suspend, nvadsp_runtime_resume,
+ nvadsp_runtime_idle)
+};
+
+uint64_t nvadsp_get_timestamp_counter(void)
+{
+#if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
+ return arch_counter_get_cntvct();
+#else
+ return __arch_counter_get_cntvct_stable();
+#endif
+}
+EXPORT_SYMBOL(nvadsp_get_timestamp_counter);
+
+int nvadsp_set_bw(struct nvadsp_drv_data *drv_data, u32 efreq)
+{
+ int ret = -EINVAL;
+
+ if (drv_data->bwmgr)
+ ret = tegra_bwmgr_set_emc(drv_data->bwmgr, efreq * 1000,
+ TEGRA_BWMGR_SET_EMC_FLOOR);
+#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
+ else if (drv_data->icc_path_handle)
+ ret = icc_set_bw(drv_data->icc_path_handle, 0,
+ (unsigned long)FREQ2ICC(efreq * 1000));
+#endif
+ if (ret)
+ dev_err(&drv_data->pdev->dev,
+ "failed to set emc freq rate:%d\n", ret);
+
+ return ret;
+}
+
+static void nvadsp_bw_register(struct nvadsp_drv_data *drv_data)
+{
+ struct device *dev = &drv_data->pdev->dev;
+
+ switch (tegra_get_chip_id()) {
+ case TEGRA210:
+ case TEGRA186:
+ case TEGRA194:
+ drv_data->bwmgr = tegra_bwmgr_register(
+ TEGRA_BWMGR_CLIENT_APE_ADSP);
+ if (IS_ERR(drv_data->bwmgr)) {
+ dev_err(dev, "unable to register bwmgr\n");
+ drv_data->bwmgr = NULL;
+ }
+ break;
+ default:
+#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
+ if (!is_tegra_hypervisor_mode()) {
+ /* Interconnect Support */
+#ifdef CONFIG_ARCH_TEGRA_23x_SOC
+ drv_data->icc_path_handle = icc_get(dev, TEGRA_ICC_APE,
+ TEGRA_ICC_PRIMARY);
+#endif
+ if (IS_ERR(drv_data->icc_path_handle)) {
+ dev_err(dev,
+ "%s: Failed to register Interconnect err=%ld\n",
+ __func__, PTR_ERR(drv_data->icc_path_handle));
+ drv_data->icc_path_handle = NULL;
+ }
+ }
+#endif
+ break;
+ }
+}
+
+static void nvadsp_bw_unregister(struct nvadsp_drv_data *drv_data)
+{
+ nvadsp_set_bw(drv_data, 0);
+
+ if (drv_data->bwmgr) {
+ tegra_bwmgr_unregister(drv_data->bwmgr);
+ drv_data->bwmgr = NULL;
+ }
+
+#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
+ if (drv_data->icc_path_handle) {
+ icc_put(drv_data->icc_path_handle);
+ drv_data->icc_path_handle = NULL;
+ }
+#endif
+}
+
+static int __init nvadsp_parse_co_mem(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *node;
+ int err = 0;
+
+ node = of_parse_phandle(dev->of_node, "nvidia,adsp_co", 0);
+ if (!node)
+ return 0;
+
+ if (!of_device_is_available(node))
+ goto exit;
+
+ err = of_address_to_resource(node, 0, &drv_data->co_mem);
+ if (err) {
+ dev_err(dev, "cannot get adsp CO memory (%d)\n", err);
+ goto exit;
+ }
+
+ drv_data->adsp_mem[ADSP_OS_SIZE] = resource_size(&drv_data->co_mem);
+
+exit:
+ of_node_put(node);
+
+ return err;
+}
+
+static void __init nvadsp_parse_clk_entries(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ u32 val32 = 0;
+
+
+ /* Optional properties, should come from platform dt files */
+ if (of_property_read_u32(dev->of_node, "nvidia,adsp_freq", &val32))
+ dev_dbg(dev, "adsp_freq dt not found\n");
+ else {
+ drv_data->adsp_freq = val32;
+ drv_data->adsp_freq_hz = val32 * 1000;
+ }
+
+ if (of_property_read_u32(dev->of_node, "nvidia,ape_freq", &val32))
+ dev_dbg(dev, "ape_freq dt not found\n");
+ else
+ drv_data->ape_freq = val32;
+
+ if (of_property_read_u32(dev->of_node, "nvidia,ape_emc_freq", &val32))
+ dev_dbg(dev, "ape_emc_freq dt not found\n");
+ else
+ drv_data->ape_emc_freq = val32;
+}
+
+static int __init nvadsp_parse_dt(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ const char *adsp_elf;
+ u32 *adsp_reset;
+ u32 *adsp_mem;
+ int iter;
+
+ adsp_reset = drv_data->unit_fpga_reset;
+ adsp_mem = drv_data->adsp_mem;
+
+ for (iter = 0; iter < ADSP_MEM_END; iter++) {
+ if (of_property_read_u32_index(dev->of_node, "nvidia,adsp_mem",
+ iter, &adsp_mem[iter])) {
+ dev_err(dev, "adsp memory dt %d not found\n", iter);
+ return -EINVAL;
+ }
+ }
+
+ for (iter = 0; iter < ADSP_EVP_END; iter++) {
+ if (of_property_read_u32_index(dev->of_node,
+ "nvidia,adsp-evp-base",
+ iter, &drv_data->evp_base[iter])) {
+ dev_err(dev, "adsp memory dt %d not found\n", iter);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_string(dev->of_node,
+ "nvidia,adsp_elf", &adsp_elf)) {
+ if (strlen(adsp_elf) < MAX_FW_STR)
+ strcpy(drv_data->adsp_elf, adsp_elf);
+ else {
+ dev_err(dev, "invalid string in nvidia,adsp_elf\n");
+ return -EINVAL;
+ }
+ } else
+ strcpy(drv_data->adsp_elf, NVADSP_ELF);
+
+ drv_data->adsp_unit_fpga = of_property_read_bool(dev->of_node,
+ "nvidia,adsp_unit_fpga");
+
+ drv_data->adsp_os_secload = of_property_read_bool(dev->of_node,
+ "nvidia,adsp_os_secload");
+
+ if (of_property_read_u32(dev->of_node, "nvidia,tegra_platform",
+ &drv_data->tegra_platform))
+ dev_dbg(dev, "tegra_platform dt not found\n");
+
+ if (of_property_read_u32(dev->of_node, "nvidia,adsp_load_timeout",
+ &drv_data->adsp_load_timeout))
+ dev_dbg(dev, "adsp_load_timeout dt not found\n");
+
+ if (drv_data->adsp_unit_fpga) {
+ for (iter = 0; iter < ADSP_UNIT_FPGA_RESET_END; iter++) {
+ if (of_property_read_u32_index(dev->of_node,
+ "nvidia,adsp_unit_fpga_reset", iter,
+ &adsp_reset[iter])) {
+ dev_err(dev, "adsp reset dt %d not found\n",
+ iter);
+ return -EINVAL;
+ }
+ }
+ }
+ nvadsp_parse_clk_entries(pdev);
+
+ if (nvadsp_parse_co_mem(pdev))
+ return -ENOMEM;
+
+ drv_data->state.evp = devm_kzalloc(dev,
+ drv_data->evp_base[ADSP_EVP_SIZE], GFP_KERNEL);
+ if (!drv_data->state.evp)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int __init nvadsp_probe(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev = &pdev->dev;
+ struct resource *res = NULL;
+ void __iomem *base = NULL;
+ uint32_t aram_addr;
+ uint32_t aram_size;
+ int dram_iter;
+ int irq_iter;
+ int ret = 0;
+ int iter;
+
+ dev_info(dev, "in probe()...\n");
+
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data),
+ GFP_KERNEL);
+ if (!drv_data) {
+ dev_err(&pdev->dev, "Failed to allocate driver data");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, drv_data);
+ drv_data->pdev = pdev;
+ drv_data->chip_data = of_device_get_match_data(dev);
+
+ ret = nvadsp_parse_dt(pdev);
+ if (ret)
+ goto out;
+
+#ifdef CONFIG_PM
+ ret = nvadsp_pm_init(pdev);
+ if (ret) {
+ dev_err(dev, "Failed in pm init");
+ goto out;
+ }
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+ if (adsp_debug_init(drv_data))
+ dev_err(dev,
+ "unable to create tegra_ape debug fs directory\n");
+#endif
+
+ drv_data->base_regs =
+ devm_kzalloc(dev, sizeof(void *) * APE_MAX_REG,
+ GFP_KERNEL);
+ if (!drv_data->base_regs) {
+ dev_err(dev, "Failed to allocate regs");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (iter = 0; iter < APE_MAX_REG; iter++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, iter);
+ if (!res) {
+ dev_err(dev,
+ "Failed to get resource with ID %d\n",
+ iter);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!drv_data->adsp_unit_fpga && iter == UNIT_FPGA_RST)
+ continue;
+
+ /*
+ * skip if the particular module is not present in a
+ * generation, for which the register start address
+ * is made 0 from dt.
+ */
+ if (res->start == 0)
+ continue;
+
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to iomap resource reg[%d]\n",
+ iter);
+ ret = PTR_ERR(base);
+ goto out;
+ }
+ drv_data->base_regs[iter] = base;
+ nvadsp_add_load_mappings(res->start, (void __force *)base,
+ resource_size(res));
+ }
+
+ drv_data->base_regs_saved = drv_data->base_regs;
+
+ for (dram_iter = 0; dram_iter < ADSP_MAX_DRAM_MAP; dram_iter++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, iter++);
+ if (!res) {
+ dev_err(dev,
+ "Failed to get DRAM map with ID %d\n", iter);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ drv_data->dram_region[dram_iter] = res;
+ }
+
+ for (irq_iter = 0; irq_iter < NVADSP_VIRQ_MAX; irq_iter++) {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, irq_iter);
+ if (!res) {
+ dev_err(dev, "Failed to get irq number for index %d\n",
+ irq_iter);
+ ret = -EINVAL;
+ goto out;
+ }
+ drv_data->agic_irqs[irq_iter] = res->start;
+ }
+
+ nvadsp_drv_data = drv_data;
+
+#ifdef CONFIG_PM
+#if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
+ tegra_pd_add_device(dev);
+#endif
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto out;
+#endif
+ ret = nvadsp_hwmbox_init(pdev);
+ if (ret)
+ goto err;
+
+ ret = nvadsp_mbox_init(pdev);
+ if (ret)
+ goto err;
+
+#ifdef CONFIG_TEGRA_ADSP_ACTMON
+ ret = ape_actmon_probe(pdev);
+ if (ret)
+ goto err;
+#endif
+
+ ret = nvadsp_os_probe(pdev);
+ if (ret)
+ goto err;
+
+ ret = nvadsp_reset_init(pdev);
+ if (ret) {
+ dev_err(dev, "Failed initialize resets\n");
+ goto err;
+ }
+
+ ret = nvadsp_app_module_probe(pdev);
+ if (ret)
+ goto err;
+
+ aram_addr = drv_data->adsp_mem[ARAM_ALIAS_0_ADDR];
+ aram_size = drv_data->adsp_mem[ARAM_ALIAS_0_SIZE];
+ ret = nvadsp_aram_init(aram_addr, aram_size);
+ if (ret)
+ dev_err(dev, "Failed to init aram\n");
+
+ nvadsp_bw_register(drv_data);
+
+ if (!drv_data->adsp_os_secload) {
+ ret = nvadsp_acast_init(pdev);
+ if (ret)
+ goto err;
+ }
+
+err:
+#ifdef CONFIG_PM
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_err(dev, "pm_runtime_put_sync failed\n");
+#endif
+out:
+ return ret;
+}
+
+static int nvadsp_remove(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ nvadsp_bw_unregister(drv_data);
+
+ nvadsp_aram_exit();
+
+ pm_runtime_disable(&pdev->dev);
+
+#ifdef CONFIG_PM
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ nvadsp_runtime_suspend(&pdev->dev);
+#endif
+
+#if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
+ tegra_pd_remove_device(&pdev->dev);
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct nvadsp_chipdata tegra210_adsp_chipdata = {
+ .hwmb = {
+ .reg_idx = AMISC,
+ .hwmbox0_reg = 0x58,
+ .hwmbox1_reg = 0X5C,
+ .hwmbox2_reg = 0x60,
+ .hwmbox3_reg = 0x64,
+ },
+ .adsp_state_hwmbox = 0,
+ .adsp_thread_hwmbox = 0,
+ .adsp_irq_hwmbox = 0,
+ .adsp_shared_mem_hwmbox = 0,
+ .adsp_os_config_hwmbox = 0,
+ .reset_init = nvadsp_reset_t21x_init,
+ .os_init = nvadsp_os_t21x_init,
+#ifdef CONFIG_PM
+ .pm_init = nvadsp_pm_t21x_init,
+#endif
+ .wdt_irq = INT_T210_ADSP_WDT,
+ .start_irq = INT_T210_AGIC_START,
+ .end_irq = INT_T210_AGIC_END,
+
+ .amc_err_war = true,
+};
+
+static struct nvadsp_chipdata tegrat18x_adsp_chipdata = {
+ .hwmb = {
+ .reg_idx = AHSP,
+ .hwmbox0_reg = 0x00000,
+ .hwmbox1_reg = 0X08000,
+ .hwmbox2_reg = 0X10000,
+ .hwmbox3_reg = 0X18000,
+ .hwmbox4_reg = 0X20000,
+ .hwmbox5_reg = 0X28000,
+ .hwmbox6_reg = 0X30000,
+ .hwmbox7_reg = 0X38000,
+ .empty_int_ie = 0x8,
+ },
+ .adsp_shared_mem_hwmbox = 0x18000, /* HWMBOX3 */
+ .adsp_thread_hwmbox = 0x20000, /* HWMBOX4 */
+ .adsp_os_config_hwmbox = 0X28000, /*HWMBOX5 */
+ .adsp_state_hwmbox = 0x30000, /* HWMBOX6 */
+ .adsp_irq_hwmbox = 0x38000, /* HWMBOX7 */
+ .acast_init = nvadsp_acast_t18x_init,
+ .reset_init = nvadsp_reset_t18x_init,
+ .os_init = nvadsp_os_t18x_init,
+#ifdef CONFIG_PM
+ .pm_init = nvadsp_pm_t18x_init,
+#endif
+ .wdt_irq = INT_T18x_ATKE_WDT_IRQ,
+ .start_irq = INT_T18x_AGIC_START,
+ .end_irq = INT_T18x_AGIC_END,
+
+ .amc_err_war = true,
+};
+
+static struct nvadsp_chipdata tegra239_adsp_chipdata = {
+ .hwmb = {
+ .reg_idx = AHSP,
+ .hwmbox0_reg = 0x00000,
+ .hwmbox1_reg = 0X08000,
+ .hwmbox2_reg = 0X10000,
+ .hwmbox3_reg = 0X18000,
+ .hwmbox4_reg = 0X20000,
+ .hwmbox5_reg = 0X28000,
+ .hwmbox6_reg = 0X30000,
+ .hwmbox7_reg = 0X38000,
+ .empty_int_ie = 0x8,
+ },
+ .adsp_shared_mem_hwmbox = 0x18000, /* HWMBOX3 */
+ .adsp_thread_hwmbox = 0x20000, /* HWMBOX4 */
+ .adsp_os_config_hwmbox = 0X28000, /* HWMBOX5 */
+ .adsp_state_hwmbox = 0x30000, /* HWMBOX6 */
+ .adsp_irq_hwmbox = 0x38000, /* HWMBOX7 */
+ .acast_init = nvadsp_acast_t18x_init,
+ .reset_init = nvadsp_reset_t18x_init,
+ .os_init = nvadsp_os_t18x_init,
+#ifdef CONFIG_PM
+ .pm_init = nvadsp_pm_t18x_init,
+#endif
+ .wdt_irq = INT_T18x_ATKE_WDT_IRQ,
+ .start_irq = INT_T18x_AGIC_START,
+ .end_irq = INT_T18x_AGIC_END,
+
+ .amc_err_war = false,
+
+ /* Populate Chip ID Major Revision as well */
+ .chipid_ext = true,
+};
+
+static const struct of_device_id nvadsp_of_match[] = {
+ {
+ .compatible = "nvidia,tegra210-adsp",
+ .data = &tegra210_adsp_chipdata,
+ }, {
+ .compatible = "nvidia,tegra18x-adsp",
+ .data = &tegrat18x_adsp_chipdata,
+ }, {
+ .compatible = "nvidia,tegra239-adsp",
+ .data = &tegra239_adsp_chipdata,
+ }, {
+ },
+};
+#endif
+
+static struct platform_driver nvadsp_driver __refdata = {
+ .driver = {
+ .name = "nvadsp",
+ .owner = THIS_MODULE,
+ .pm = &nvadsp_pm_ops,
+ .of_match_table = of_match_ptr(nvadsp_of_match),
+ },
+ .probe = nvadsp_probe,
+ .remove = nvadsp_remove,
+};
+
+static int __init nvadsp_init(void)
+{
+ return platform_driver_register(&nvadsp_driver);
+}
+
+static void __exit nvadsp_exit(void)
+{
+ platform_driver_unregister(&nvadsp_driver);
+}
+
+module_init(nvadsp_init);
+module_exit(nvadsp_exit);
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("Tegra Host ADSP Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/platform/tegra/nvadsp/dev.h b/drivers/platform/tegra/nvadsp/dev.h
new file mode 100644
index 00000000..bba98892
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/dev.h
@@ -0,0 +1,341 @@
+/*
+ * dev.h
+ *
+ * A header file for Host driver for ADSP and APE
+ *
+ * Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEGRA_NVADSP_DEV_H
+#define __TEGRA_NVADSP_DEV_H
+
+#include
+#include
+#include
+#include
+
+#include
+#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
+#ifdef CONFIG_ARCH_TEGRA_23x_SOC
+#include
+#include
+#endif
+#include
+#endif
+
+#include "hwmailbox.h"
+#include "amc.h"
+
+/*
+ * Note: These enums should be aligned to the regs mentioned in the
+ * device tree
+*/
+enum {
+ AMC,
+ AMISC,
+ ABRIDGE,
+ UNIT_FPGA_RST,
+ AHSP,
+ APE_MAX_REG
+};
+
+enum {
+ ADSP_DRAM1,
+ ADSP_DRAM2,
+ ADSP_MAX_DRAM_MAP
+};
+
+/*
+ * Note: These enums should be aligned to the adsp_mem node mentioned in the
+ * device tree
+*/
+enum adsp_mem_dt {
+ ADSP_OS_ADDR,
+ ADSP_OS_SIZE,
+ ADSP_APP_ADDR,
+ ADSP_APP_SIZE,
+ ARAM_ALIAS_0_ADDR,
+ ARAM_ALIAS_0_SIZE,
+ ACSR_ADDR, /* ACSR: ADSP CPU SHARED REGION */
+ ACSR_SIZE,
+ ADSP_MEM_END,
+};
+
+enum adsp_evp_dt {
+ ADSP_EVP_BASE,
+ ADSP_EVP_SIZE,
+ ADSP_EVP_END,
+};
+
+enum adsp_unit_fpga_reset {
+ ADSP_ASSERT,
+ ADSP_DEASSERT,
+ ADSP_UNIT_FPGA_RESET_END,
+};
+
+#define AMISC_REGS 0x2000
+
+#define AMISC_ADSP_L2_REGFILEBASE 0x10
+#define AMISC_SHRD_SMP_STA 0x14
+#define AMISC_SEM_REG_START 0x1c
+#define AMISC_SEM_REG_END 0x44
+#define AMISC_TSC 0x48
+#define AMISC_ACTMON_AVG_CNT 0x81c
+
+#define AMISC_REG_START_OFFSET 0x0
+#define AMISC_REG_MBOX_OFFSET 0x64
+#define ADSP_ACTMON_REG_START_OFFSET 0x800
+#define ADSP_ACTMON_REG_END_OFFSET 0x828
+#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
+#ifdef CONFIG_ARCH_TEGRA_23x_SOC
+#define FREQ2ICC(x) (Bps_to_icc(emc_freq_to_bw(x)))
+#else
+#define FREQ2ICC(x) 0UL
+#endif
+#endif
+
+#define NVADSP_ELF "adsp.elf"
+#define MAX_FW_STR 30
+
+enum nvadsp_virqs {
+ MBOX_SEND_VIRQ,
+ MBOX_RECV_VIRQ,
+ WDT_VIRQ,
+ WFI_VIRQ,
+ AMC_ERR_VIRQ,
+ ACTMON_VIRQ,
+ NVADSP_VIRQ_MAX,
+};
+
+struct nvadsp_pm_state {
+ u32 aram[AMC_ARAM_WSIZE];
+ uint32_t amc_regs[AMC_REGS];
+ uint32_t amisc_regs[AMISC_REGS];
+ u32 *evp;
+ void *evp_ptr;
+};
+
+struct nvadsp_hwmb {
+ u32 reg_idx;
+ u32 hwmbox0_reg;
+ u32 hwmbox1_reg;
+ u32 hwmbox2_reg;
+ u32 hwmbox3_reg;
+ u32 hwmbox4_reg;
+ u32 hwmbox5_reg;
+ u32 hwmbox6_reg;
+ u32 hwmbox7_reg;
+ u32 empty_int_ie;
+};
+
+
+typedef int (*acast_init) (struct platform_device *pdev);
+typedef int (*reset_init) (struct platform_device *pdev);
+typedef int (*os_init) (struct platform_device *pdev);
+#ifdef CONFIG_PM
+typedef int (*pm_init) (struct platform_device *pdev);
+#endif
+
+struct nvadsp_chipdata {
+ struct nvadsp_hwmb hwmb;
+ u32 adsp_state_hwmbox;
+ u32 adsp_thread_hwmbox;
+ u32 adsp_irq_hwmbox;
+ u32 adsp_shared_mem_hwmbox;
+ u32 adsp_os_config_hwmbox;
+ acast_init acast_init;
+ reset_init reset_init;
+ os_init os_init;
+#ifdef CONFIG_PM
+ pm_init pm_init;
+#endif
+ int wdt_irq;
+ int start_irq;
+ int end_irq;
+
+ bool amc_err_war;
+ bool chipid_ext;
+};
+
+struct nvadsp_drv_data {
+ void __iomem **base_regs;
+ void __iomem **base_regs_saved;
+ struct platform_device *pdev;
+ struct resource *dram_region[ADSP_MAX_DRAM_MAP];
+ struct hwmbox_queue hwmbox_send_queue;
+
+ struct nvadsp_mbox **mboxes;
+ unsigned long *mbox_ids;
+ spinlock_t mbox_lock;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *adsp_debugfs_root;
+#endif
+ struct clk *ape_clk;
+ struct clk *apb2ape_clk;
+ struct clk *adsp_clk;
+ struct clk *aclk_clk;
+ struct clk *adsp_cpu_abus_clk;
+ struct clk *adsp_neon_clk;
+ struct clk *uartape_clk;
+ struct clk *ahub_clk;
+ unsigned long adsp_freq; /* in KHz*/
+ unsigned long adsp_freq_hz; /* in Hz*/
+ unsigned long ape_freq; /* in KHz*/
+ unsigned long ape_emc_freq; /* in KHz*/
+
+ int (*runtime_suspend)(struct device *dev);
+ int (*runtime_resume)(struct device *dev);
+ int (*runtime_idle)(struct device *dev);
+ int (*assert_adsp)(struct nvadsp_drv_data *drv_data);
+ int (*deassert_adsp)(struct nvadsp_drv_data *drv_data);
+ struct reset_control *adspall_rst;
+ struct reset_control *ape_tke_rst;
+
+ struct nvadsp_pm_state state;
+ bool adsp_os_running;
+ bool adsp_os_suspended;
+ bool adsp_os_secload;
+
+ void *shared_adsp_os_data;
+ dma_addr_t shared_adsp_os_data_iova;
+
+#ifdef CONFIG_TEGRA_ADSP_DFS
+ bool dfs_initialized;
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_ACTMON
+ bool actmon_initialized;
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
+ bool cpustat_initialized;
+#endif
+
+#if defined(CONFIG_TEGRA_ADSP_FILEIO)
+ bool adspff_init;
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_LPTHREAD
+ bool lpthread_initialized;
+#endif
+
+ wait_queue_head_t adsp_health_waitq;
+ bool adsp_crashed;
+
+ u32 adsp_mem[ADSP_MEM_END];
+ bool adsp_unit_fpga;
+ u32 unit_fpga_reset[ADSP_UNIT_FPGA_RESET_END];
+ u32 agic_irqs[NVADSP_VIRQ_MAX];
+
+ struct tegra_bwmgr_client *bwmgr;
+#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
+ struct icc_path *icc_path_handle; /* icc_path handle handle */
+#endif
+ u32 evp_base[ADSP_EVP_END];
+
+ const struct nvadsp_chipdata *chip_data;
+
+ /* CO mem in backdoor boot */
+ struct resource co_mem;
+
+ /* enum tegra_platform */
+ u32 tegra_platform;
+
+ /* "nvidia,adsp_load_timeout" (in ms) */
+ u32 adsp_load_timeout;
+
+ /* "nvidia,adsp_elf" (FW for backdoor boot) */
+ char adsp_elf[MAX_FW_STR];
+};
+
+#define ADSP_CONFIG 0x04
+#define MAXCLKLATENCY (3 << 29)
+#define UART_BAUD_RATE 9600
+
+status_t nvadsp_mbox_init(struct platform_device *pdev);
+
+int nvadsp_setup_amc_interrupts(struct platform_device *pdev);
+void nvadsp_free_amc_interrupts(struct platform_device *pdev);
+int nvadsp_set_bw(struct nvadsp_drv_data *drv, u32 efreq);
+
+#ifdef CONFIG_TEGRA_ADSP_DFS
+void adsp_cpu_set_rate(unsigned long freq);
+int adsp_dfs_core_init(struct platform_device *pdev);
+int adsp_dfs_core_exit(struct platform_device *pdev);
+u32 adsp_to_emc_freq(u32 adspfreq);
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_ACTMON
+int ape_actmon_probe(struct platform_device *pdev);
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
+int adsp_cpustat_init(struct platform_device *pdev);
+int adsp_cpustat_exit(struct platform_device *pdev);
+#endif
+
+#if defined(CONFIG_TEGRA_ADSP_FILEIO)
+int adspff_init(struct platform_device *pdev);
+void adspff_exit(void);
+#endif
+
+#ifdef CONFIG_TEGRA_EMC_APE_DFS
+status_t emc_dfs_init(struct platform_device *pdev);
+void emc_dfs_exit(void);
+#endif
+
+#ifdef CONFIG_PM
+static inline int __init nvadsp_pm_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ if (drv_data->chip_data->pm_init)
+ return drv_data->chip_data->pm_init(pdev);
+
+ return -EINVAL;
+}
+#endif
+static inline int __init nvadsp_reset_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ if (drv_data->chip_data->reset_init)
+ return drv_data->chip_data->reset_init(pdev);
+
+ return -EINVAL;
+}
+
+static inline int __init nvadsp_acast_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ if (drv_data->chip_data->acast_init)
+ return drv_data->chip_data->acast_init(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_TEGRA_ADSP_LPTHREAD
+int adsp_lpthread_init(bool is_adsp_suspended);
+int adsp_lpthread_resume(void);
+int adsp_lpthread_pause(void);
+int adsp_lpthread_uninit(void);
+int adsp_lpthread_get_state(void);
+
+int adsp_lpthread_entry(struct platform_device *pdev);
+int adsp_lpthread_exit(struct platform_device *pdev);
+int adsp_lpthread_set_suspend(bool is_suspended);
+#endif
+
+#endif /* __TEGRA_NVADSP_DEV_H */
diff --git a/drivers/platform/tegra/nvadsp/dram_app_mem_manager.c b/drivers/platform/tegra/nvadsp/dram_app_mem_manager.c
new file mode 100644
index 00000000..df45f019
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/dram_app_mem_manager.c
@@ -0,0 +1,100 @@
+/*
+ * dram_app_mem_manager.c
+ *
+ * dram app memory manager for allocating memory for text,bss and data
+ *
+ * Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
+
+#include
+#include
+
+#include "dram_app_mem_manager.h"
+
+#define ALIGN_TO_ADSP_PAGE(x) ALIGN(x, 4096)
+
+static void *dram_app_mem_handle;
+
+static LIST_HEAD(dram_app_mem_alloc_list);
+static LIST_HEAD(dram_app_mem_free_list);
+
+void dram_app_mem_print(void)
+{
+ mem_print(dram_app_mem_handle);
+}
+
+void *dram_app_mem_request(const char *name, size_t size)
+{
+ return mem_request(dram_app_mem_handle, name, ALIGN_TO_ADSP_PAGE(size));
+}
+
+bool dram_app_mem_release(void *handle)
+{
+ return mem_release(dram_app_mem_handle, handle);
+}
+
+unsigned long dram_app_mem_get_address(void *handle)
+{
+ return mem_get_address(handle);
+}
+
+static struct dentry *dram_app_mem_dump_debugfs_file;
+
+static int dram_app_mem_dump(struct seq_file *s, void *data)
+{
+ mem_dump(dram_app_mem_handle, s);
+ return 0;
+}
+
+static int dram_app_mem_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dram_app_mem_dump, inode->i_private);
+}
+
+static const struct file_operations dram_app_mem_dump_fops = {
+ .open = dram_app_mem_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int dram_app_mem_init(unsigned long start, unsigned long size)
+{
+ dram_app_mem_handle =
+ create_mem_manager("DRAM_APP_MANAGER", start, size);
+ if (IS_ERR(dram_app_mem_handle)) {
+ pr_err("ERROR: failed to create aram memory_manager");
+ return PTR_ERR(dram_app_mem_handle);
+ }
+
+ if (debugfs_initialized()) {
+ dram_app_mem_dump_debugfs_file =
+ debugfs_create_file("dram_app_mem_dump",
+ S_IRUSR, NULL, NULL, &dram_app_mem_dump_fops);
+ if (!dram_app_mem_dump_debugfs_file) {
+ pr_err("ERROR: failed to create dram_app_mem_dump debugfs");
+ destroy_mem_manager(dram_app_mem_handle);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+void dram_app_mem_exit(void)
+{
+ debugfs_remove(dram_app_mem_dump_debugfs_file);
+ destroy_mem_manager(dram_app_mem_handle);
+}
+
diff --git a/drivers/platform/tegra/nvadsp/dram_app_mem_manager.h b/drivers/platform/tegra/nvadsp/dram_app_mem_manager.h
new file mode 100644
index 00000000..7f2ca78e
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/dram_app_mem_manager.h
@@ -0,0 +1,30 @@
+/*
+ * Header file for dram app memory manager
+ *
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H
+#define __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H
+
+#include "mem_manager.h"
+
+int dram_app_mem_init(unsigned long, unsigned long);
+void dram_app_mem_exit(void);
+
+void *dram_app_mem_request(const char *name, size_t size);
+bool dram_app_mem_release(void *handle);
+
+unsigned long dram_app_mem_get_address(void *handle);
+void dram_app_mem_print(void);
+
+#endif /* __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H */
diff --git a/drivers/platform/tegra/nvadsp/emc_dfs.c b/drivers/platform/tegra/nvadsp/emc_dfs.c
new file mode 100644
index 00000000..e0964e78
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/emc_dfs.c
@@ -0,0 +1,472 @@
+/*
+ * emc_dfs.c
+ *
+ * Emc dynamic frequency scaling due to APE
+ *
+ * Copyright (C) 2014-2020, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "dev.h"
+
+/* Register offsets */
+#define ABRIDGE_STATS_READ_0 0x04
+#define ABRIDGE_STATS_WRITE_0 0x0c
+#define ABRIDGE_STATS_CLEAR_0 0x1b
+#define ABRIDGE_STATS_HI_0FFSET 0x04
+
+/* Sample Period in usecs */
+#define DEFAULT_SAMPLE_PERIOD 500000
+#define INT_SHIFT 32
+#define make64(hi, low) ((((u64)hi) << INT_SHIFT) | (low))
+#define SCALING_DIVIDER 2
+#define BOOST_DOWN_COUNT 2
+#define DEFAULT_BOOST_UP_THRESHOLD 2000000;
+#define DEFAULT_BOOST_STEP 2
+
+struct emc_dfs_info {
+ void __iomem *abridge_base;
+ struct timer_list cnt_timer;
+
+ u64 rd_cnt;
+ u64 wr_cnt;
+ bool enable;
+ u64 avg_cnt;
+
+ unsigned long timer_rate;
+ ktime_t prev_time;
+
+ u32 dn_count;
+ u32 boost_dn_count;
+
+ u64 boost_up_threshold;
+ u8 boost_step;
+
+ struct work_struct clk_set_work;
+ unsigned long cur_freq;
+ bool speed_change_flag;
+ unsigned long max_freq;
+
+ struct clk *emcclk;
+};
+
+static struct emc_dfs_info global_emc_info;
+static struct emc_dfs_info *einfo;
+static struct task_struct *speedchange_task;
+static spinlock_t speedchange_lock;
+
+static u64 read64(u32 offset)
+{
+ u32 low;
+ u32 hi;
+
+ low = readl(einfo->abridge_base + offset);
+ hi = readl(einfo->abridge_base + (offset + ABRIDGE_STATS_HI_0FFSET));
+ return make64(hi, low);
+}
+static unsigned long count_to_emcfreq(void)
+{
+ unsigned long tfreq = 0;
+
+ if (!einfo->avg_cnt) {
+ if (einfo->dn_count >= einfo->boost_dn_count) {
+ tfreq = einfo->cur_freq / SCALING_DIVIDER;
+ einfo->dn_count = 0;
+ } else
+ einfo->dn_count++;
+ } else if (einfo->avg_cnt >= einfo->boost_up_threshold) {
+ if (einfo->boost_step)
+ tfreq = einfo->cur_freq * einfo->boost_step;
+ }
+
+ pr_debug("%s:avg_cnt: %llu current freq(kHz): %lu target freq(kHz): %lu\n",
+ __func__, einfo->avg_cnt, einfo->cur_freq, tfreq);
+
+ return tfreq;
+}
+
+static int clk_work(void *data)
+{
+ int ret;
+
+ if (einfo->emcclk && einfo->speed_change_flag && einfo->cur_freq) {
+ ret = clk_set_rate(einfo->emcclk, einfo->cur_freq * 1000);
+ if (ret) {
+ pr_err("failed to set ape.emc freq:%d\n", ret);
+ BUG_ON(ret);
+ }
+ einfo->cur_freq = clk_get_rate(einfo->emcclk) / 1000;
+ pr_info("ape.emc: setting emc clk: %lu\n", einfo->cur_freq);
+ }
+
+ mod_timer(&einfo->cnt_timer,
+ jiffies + usecs_to_jiffies(einfo->timer_rate));
+ return 0;
+}
+static void emc_dfs_timer(unsigned long data)
+{
+ u64 cur_cnt;
+ u64 delta_cnt;
+ u64 prev_cnt;
+ u64 delta_time;
+ ktime_t now;
+ unsigned long target_freq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&speedchange_lock, flags);
+
+ /* Return if emc dfs is disabled */
+ if (!einfo->enable) {
+ spin_unlock_irqrestore(&speedchange_lock, flags);
+ return;
+ }
+
+ prev_cnt = einfo->rd_cnt + einfo->wr_cnt;
+
+ einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
+ einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
+ pr_debug("einfo->rd_cnt: %llu einfo->wr_cnt: %llu\n",
+ einfo->rd_cnt, einfo->wr_cnt);
+
+ cur_cnt = einfo->rd_cnt + einfo->wr_cnt;
+ delta_cnt = cur_cnt - prev_cnt;
+
+ now = ktime_get();
+
+ delta_time = ktime_to_ns(ktime_sub(now, einfo->prev_time));
+ if (!delta_time) {
+ pr_err("%s: time interval to calculate emc scaling is zero\n",
+ __func__);
+ spin_unlock_irqrestore(&speedchange_lock, flags);
+ goto exit;
+ }
+
+ einfo->prev_time = now;
+ einfo->avg_cnt = delta_cnt / delta_time;
+
+ /* if 0: no scaling is required */
+ target_freq = count_to_emcfreq();
+ if (!target_freq) {
+ einfo->speed_change_flag = false;
+ } else {
+ einfo->cur_freq = target_freq;
+ einfo->speed_change_flag = true;
+ }
+
+ spin_unlock_irqrestore(&speedchange_lock, flags);
+ pr_info("einfo->avg_cnt: %llu delta_cnt: %llu delta_time %llu emc_freq:%lu\n",
+ einfo->avg_cnt, delta_cnt, delta_time, einfo->cur_freq);
+
+exit:
+ wake_up_process(speedchange_task);
+}
+
+static void emc_dfs_enable(void)
+{
+ einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
+ einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
+
+ einfo->prev_time = ktime_get();
+ mod_timer(&einfo->cnt_timer, jiffies + 2);
+}
+static void emc_dfs_disable(void)
+{
+ einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
+ einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
+
+ del_timer_sync(&einfo->cnt_timer);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *emc_dfs_root;
+
+#define RW_MODE (S_IWUSR | S_IRUSR)
+#define RO_MODE S_IRUSR
+
+/* Get emc dfs staus: 0: disabled 1:enabled */
+static int dfs_enable_get(void *data, u64 *val)
+{
+ *val = einfo->enable;
+ return 0;
+}
+/* Enable/disable emc dfs */
+static int dfs_enable_set(void *data, u64 val)
+{
+ einfo->enable = (bool) val;
+ /*
+ * If enabling: activate a timer to execute in next 2 jiffies,
+ * so that emc scaled value takes effect immidiately.
+ */
+ if (einfo->enable)
+ emc_dfs_enable();
+ else
+ emc_dfs_disable();
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(enable_fops, dfs_enable_get,
+ dfs_enable_set, "%llu\n");
+
+/* Get emc dfs staus: 0: disabled 1:enabled */
+static int boost_up_threshold_get(void *data, u64 *val)
+{
+ *val = einfo->boost_up_threshold;
+ return 0;
+}
+/* Enable/disable emc dfs */
+static int boost_up_threshold_set(void *data, u64 val)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&speedchange_lock, flags);
+
+ if (!einfo->enable) {
+ pr_info("EMC dfs is not enabled\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (val)
+ einfo->boost_up_threshold = val;
+
+err:
+ spin_unlock_irqrestore(&speedchange_lock, flags);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(up_threshold_fops,
+ boost_up_threshold_get, boost_up_threshold_set, "%llu\n");
+
+/* scaling emc freq in multiple of boost factor */
+static int boost_step_get(void *data, u64 *val)
+{
+ *val = einfo->boost_step;
+ return 0;
+}
+/* Set period in usec */
+static int boost_step_set(void *data, u64 val)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&speedchange_lock, flags);
+
+ if (!einfo->enable) {
+ pr_info("EMC dfs is not enabled\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!val)
+ einfo->boost_step = 1;
+ else
+ einfo->boost_step = (u8) val;
+err:
+ spin_unlock_irqrestore(&speedchange_lock, flags);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(boost_fops, boost_step_get,
+ boost_step_set, "%llu\n");
+
+/* minimum time after that emc scaling down happens in usec */
+static int boost_down_count_get(void *data, u64 *val)
+{
+ *val = einfo->boost_dn_count;
+ return 0;
+}
+/* Set period in usec */
+static int boost_down_count_set(void *data, u64 val)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&speedchange_lock, flags);
+
+ if (!einfo->enable) {
+ pr_info("EMC dfs is not enabled\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (val)
+ einfo->boost_dn_count = (u32) val;
+ ret = 0;
+err:
+ spin_unlock_irqrestore(&speedchange_lock, flags);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(down_cnt_fops, boost_down_count_get,
+ boost_down_count_set, "%llu\n");
+
+static int period_get(void *data, u64 *val)
+{
+ *val = einfo->timer_rate;
+ return 0;
+}
+
+/* Set period in usec */
+static int period_set(void *data, u64 val)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&speedchange_lock, flags);
+
+ if (!einfo->enable) {
+ pr_info("EMC dfs is not enabled\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (val)
+ einfo->timer_rate = (unsigned long)val;
+
+err:
+ spin_unlock_irqrestore(&speedchange_lock, flags);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(period_fops, period_get, period_set, "%llu\n");
+
+
+static int emc_dfs_debugfs_init(struct nvadsp_drv_data *drv)
+{
+ int ret = -ENOMEM;
+ struct dentry *d;
+
+ if (!drv->adsp_debugfs_root)
+ return ret;
+
+ emc_dfs_root = debugfs_create_dir("emc_dfs", drv->adsp_debugfs_root);
+ if (!emc_dfs_root)
+ goto err_out;
+
+ d = debugfs_create_file("enable", RW_MODE, emc_dfs_root, NULL,
+ &enable_fops);
+ if (!d)
+ goto err_root;
+
+ d = debugfs_create_file("boost_up_threshold", RW_MODE, emc_dfs_root,
+ NULL, &up_threshold_fops);
+ if (!d)
+ goto err_root;
+
+ d = debugfs_create_file("boost_step", RW_MODE, emc_dfs_root, NULL,
+ &boost_fops);
+ if (!d)
+ goto err_root;
+
+ d = debugfs_create_file("boost_down_count", RW_MODE, emc_dfs_root,
+ NULL, &down_cnt_fops);
+ if (!d)
+ goto err_root;
+
+ d = debugfs_create_file("period", RW_MODE, emc_dfs_root, NULL,
+ &period_fops);
+ if (!d)
+ goto err_root;
+
+ return 0;
+
+err_root:
+ debugfs_remove_recursive(emc_dfs_root);
+
+err_out:
+ return ret;
+}
+
+#endif
+
+status_t __init emc_dfs_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+#endif
+ int ret = 0;
+
+ einfo = &global_emc_info;
+ einfo->abridge_base = drv->base_regs[ABRIDGE];
+
+ einfo->emcclk = clk_get_sys("ape", "emc");
+ if (IS_ERR_OR_NULL(einfo->emcclk)) {
+ dev_info(&pdev->dev, "unable to find ape.emc clock\n");
+ return PTR_ERR(einfo->emcclk);
+ }
+
+ einfo->timer_rate = DEFAULT_SAMPLE_PERIOD;
+ einfo->boost_up_threshold = DEFAULT_BOOST_UP_THRESHOLD;
+ einfo->boost_step = DEFAULT_BOOST_STEP;
+ einfo->dn_count = 0;
+ einfo->boost_dn_count = BOOST_DOWN_COUNT;
+ einfo->enable = 1;
+
+ einfo->max_freq = clk_round_rate(einfo->emcclk, ULONG_MAX);
+ ret = clk_set_rate(einfo->emcclk, einfo->max_freq);
+ if (ret) {
+ dev_info(&pdev->dev, "failed to set ape.emc freq:%d\n", ret);
+ return PTR_ERR(einfo->emcclk);
+ }
+ einfo->max_freq /= 1000;
+ einfo->cur_freq = clk_get_rate(einfo->emcclk) / 1000;
+ if (!einfo->cur_freq) {
+ dev_info(&pdev->dev, "ape.emc freq is NULL:\n");
+ return PTR_ERR(einfo->emcclk);
+ }
+
+ dev_info(&pdev->dev, "einfo->cur_freq %lu\n", einfo->cur_freq);
+
+ spin_lock_init(&speedchange_lock);
+ init_timer(&einfo->cnt_timer);
+ einfo->cnt_timer.function = emc_dfs_timer;
+
+ speedchange_task = kthread_create(clk_work, NULL, "emc_dfs");
+ if (IS_ERR(speedchange_task))
+ return PTR_ERR(speedchange_task);
+
+#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+ sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
+#else
+ sched_set_fifo(speedchange_task);
+#endif
+
+ get_task_struct(speedchange_task);
+
+ /* NB: wake up so the thread does not look hung to the freezer */
+ wake_up_process(speedchange_task);
+
+ emc_dfs_enable();
+
+ dev_info(&pdev->dev, "APE EMC DFS is initialized\n");
+
+#ifdef CONFIG_DEBUG_FS
+ emc_dfs_debugfs_init(drv);
+#endif
+
+ return ret;
+}
+void __exit emc_dfs_exit(void)
+{
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
+}
diff --git a/drivers/platform/tegra/nvadsp/hwmailbox.c b/drivers/platform/tegra/nvadsp/hwmailbox.c
new file mode 100644
index 00000000..c6d90820
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/hwmailbox.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "dev.h"
+
+
+static struct platform_device *nvadsp_pdev;
+static struct nvadsp_drv_data *nvadsp_drv_data;
+/* Initialized to false by default */
+static bool is_hwmbox_busy;
+#ifdef CONFIG_MBOX_ACK_HANDLER
+static int hwmbox_last_msg;
+#endif
+
+/*
+ * Mailbox 0 is for receiving messages
+ * from ADSP i.e. CPU <-- ADSP.
+ */
+#define INT_RECV_HWMBOX INT_AMISC_MBOX_FULL0
+
+static inline u32 recv_hwmbox(void)
+{
+ return nvadsp_drv_data->chip_data->hwmb.hwmbox0_reg;
+}
+
+/*
+ * Mailbox 1 is for sending messages
+ * to ADSP i.e. CPU --> ADSP
+ */
+#define INT_SEND_HWMBOX INT_AMISC_MBOX_EMPTY1
+
+static inline u32 send_hwmbox(void)
+{
+ return nvadsp_drv_data->chip_data->hwmb.hwmbox1_reg;
+}
+
+
+u32 hwmb_reg_idx(void)
+{
+ return nvadsp_drv_data->chip_data->hwmb.reg_idx;
+}
+
+u32 hwmbox_readl(u32 reg)
+{
+ return readl(nvadsp_drv_data->base_regs[hwmb_reg_idx()] + reg);
+}
+
+void hwmbox_writel(u32 val, u32 reg)
+{
+ writel(val, nvadsp_drv_data->base_regs[hwmb_reg_idx()] + reg);
+}
+
+
+#define PRINT_HWMBOX(x) \
+ dev_info(&nvadsp_pdev->dev, "%s: 0x%x\n", #x, hwmbox_readl(x))
+
+void dump_mailbox_regs(void)
+{
+ dev_info(&nvadsp_pdev->dev, "dumping hwmailbox registers ...\n");
+ PRINT_HWMBOX(recv_hwmbox());
+ PRINT_HWMBOX(send_hwmbox());
+}
+
+static void hwmboxq_init(struct hwmbox_queue *queue)
+{
+ queue->head = 0;
+ queue->tail = 0;
+ queue->count = 0;
+ init_completion(&queue->comp);
+ spin_lock_init(&queue->lock);
+}
+
+/* Must be called with queue lock held in non-interrupt context */
+static inline bool
+is_hwmboxq_empty(struct hwmbox_queue *queue)
+{
+ return (queue->count == 0);
+}
+
+/* Must be called with queue lock held in non-interrupt context */
+static inline bool
+is_hwmboxq_full(struct hwmbox_queue *queue)
+{
+ return (queue->count == HWMBOX_QUEUE_SIZE);
+}
+
+/* Must be called with queue lock held in non-interrupt context */
+static status_t hwmboxq_enqueue(struct hwmbox_queue *queue,
+ uint32_t data)
+{
+ int ret = 0;
+
+ if (is_hwmboxq_full(queue)) {
+ ret = -EBUSY;
+ goto comp;
+ }
+ queue->array[queue->tail] = data;
+ queue->tail = (queue->tail + 1) & HWMBOX_QUEUE_SIZE_MASK;
+ queue->count++;
+
+ if (is_hwmboxq_full(queue))
+ goto comp;
+ else
+ goto out;
+
+ comp:
+ reinit_completion(&queue->comp);
+ out:
+ return ret;
+}
+
+status_t nvadsp_hwmbox_send_data(uint16_t mid, uint32_t data, uint32_t flags)
+{
+ spinlock_t *lock = &nvadsp_drv_data->hwmbox_send_queue.lock;
+ unsigned long lockflags;
+ int ret = 0;
+
+ if (flags & NVADSP_MBOX_SMSG) {
+ data = PREPARE_HWMBOX_SMSG(mid, data);
+ pr_debug("nvadsp_mbox_send: data: 0x%x\n", data);
+ }
+
+ /* TODO handle LMSG */
+
+ spin_lock_irqsave(lock, lockflags);
+
+ if (!is_hwmbox_busy) {
+ is_hwmbox_busy = true;
+ pr_debug("nvadsp_mbox_send: empty mailbox. write to mailbox.\n");
+#ifdef CONFIG_MBOX_ACK_HANDLER
+ hwmbox_last_msg = data;
+#endif
+ hwmbox_writel(data, send_hwmbox());
+ } else {
+ pr_debug("nvadsp_mbox_send: enqueue data\n");
+ ret = hwmboxq_enqueue(&nvadsp_drv_data->hwmbox_send_queue,
+ data);
+ }
+ spin_unlock_irqrestore(lock, lockflags);
+ return ret;
+}
+
+/* Must be called with queue lock held in non-interrupt context */
+static status_t hwmboxq_dequeue(struct hwmbox_queue *queue,
+ uint32_t *data)
+{
+ int ret = 0;
+
+ if (is_hwmboxq_empty(queue)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (is_hwmboxq_full(queue))
+ complete_all(&nvadsp_drv_data->hwmbox_send_queue.comp);
+
+ *data = queue->array[queue->head];
+ queue->head = (queue->head + 1) & HWMBOX_QUEUE_SIZE_MASK;
+ queue->count--;
+
+ out:
+ return ret;
+}
+
+static irqreturn_t hwmbox_send_empty_int_handler(int irq, void *devid)
+{
+ spinlock_t *lock = &nvadsp_drv_data->hwmbox_send_queue.lock;
+ struct device *dev = &nvadsp_pdev->dev;
+ unsigned long lockflags;
+ uint32_t data;
+ int ret;
+
+ if (!is_hwmbox_busy)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(lock, lockflags);
+
+ data = hwmbox_readl(send_hwmbox());
+ if (data != PREPARE_HWMBOX_EMPTY_MSG())
+ dev_err(dev, "last mailbox sent failed with 0x%x\n", data);
+
+#ifdef CONFIG_MBOX_ACK_HANDLER
+ {
+ uint16_t last_mboxid = HWMBOX_SMSG_MID(hwmbox_last_msg);
+ struct nvadsp_mbox *mbox = nvadsp_drv_data->mboxes[last_mboxid];
+
+ if (mbox) {
+ nvadsp_mbox_handler_t ack_handler = mbox->ack_handler;
+
+ if (ack_handler) {
+ uint32_t msg = HWMBOX_SMSG_MSG(hwmbox_last_msg);
+
+ ack_handler(msg, mbox->hdata);
+ }
+ }
+ }
+#endif
+ ret = hwmboxq_dequeue(&nvadsp_drv_data->hwmbox_send_queue,
+ &data);
+ if (ret == 0) {
+#ifdef CONFIG_MBOX_ACK_HANDLER
+ hwmbox_last_msg = data;
+#endif
+ hwmbox_writel(data, send_hwmbox());
+ dev_dbg(dev, "Writing 0x%x to SEND_HWMBOX\n", data);
+ } else {
+ is_hwmbox_busy = false;
+ }
+ spin_unlock_irqrestore(lock, lockflags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hwmbox_recv_full_int_handler(int irq, void *devid)
+{
+ uint32_t data;
+ int ret;
+
+ data = hwmbox_readl(recv_hwmbox());
+ hwmbox_writel(PREPARE_HWMBOX_EMPTY_MSG(), recv_hwmbox());
+
+ if (IS_HWMBOX_MSG_SMSG(data)) {
+ uint16_t mboxid = HWMBOX_SMSG_MID(data);
+ struct nvadsp_mbox *mbox = nvadsp_drv_data->mboxes[mboxid];
+
+ if (!mbox) {
+ dev_info(&nvadsp_pdev->dev,
+ "Failed to get mbox for mboxid: %u\n",
+ mboxid);
+ goto out;
+ }
+
+ if (mbox->handler) {
+ mbox->handler(HWMBOX_SMSG_MSG(data), mbox->hdata);
+ } else {
+ ret = nvadsp_mboxq_enqueue(&mbox->recv_queue,
+ HWMBOX_SMSG_MSG(data));
+ if (ret) {
+ dev_info(&nvadsp_pdev->dev,
+ "Failed to deliver msg 0x%x to"
+ " mbox id %u\n",
+ HWMBOX_SMSG_MSG(data), mboxid);
+ goto out;
+ }
+ }
+ } else if (IS_HWMBOX_MSG_LMSG(data)) {
+ /* TODO */
+ }
+ out:
+ return IRQ_HANDLED;
+}
+
+void nvadsp_free_hwmbox_interrupts(struct platform_device *pdev)
+{
+
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int recv_virq, send_virq;
+
+ recv_virq = drv->agic_irqs[MBOX_RECV_VIRQ];
+ send_virq = drv->agic_irqs[MBOX_SEND_VIRQ];
+
+ devm_free_irq(dev, recv_virq, pdev);
+ devm_free_irq(dev, send_virq, pdev);
+}
+
+int nvadsp_setup_hwmbox_interrupts(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ u32 empty_int_ie = drv->chip_data->hwmb.empty_int_ie;
+ int recv_virq, send_virq;
+ int ret;
+
+ recv_virq = drv->agic_irqs[MBOX_RECV_VIRQ];
+ send_virq = drv->agic_irqs[MBOX_SEND_VIRQ];
+
+ ret = devm_request_irq(dev, recv_virq, hwmbox_recv_full_int_handler,
+ IRQF_TRIGGER_RISING, "hwmbox0_recv_full", pdev);
+ if (ret)
+ goto err;
+
+ if (empty_int_ie)
+ hwmbox_writel(0x0, send_hwmbox() + empty_int_ie);
+ ret = devm_request_irq(dev, send_virq, hwmbox_send_empty_int_handler,
+ IRQF_TRIGGER_RISING,
+ "hwmbox1_send_empty", pdev);
+ if (empty_int_ie)
+ hwmbox_writel(0x1, send_hwmbox() + empty_int_ie);
+ if (ret)
+ goto free_interrupts;
+
+ return ret;
+
+ free_interrupts:
+ nvadsp_free_hwmbox_interrupts(pdev);
+ err:
+ return ret;
+}
+
+int __init nvadsp_hwmbox_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ nvadsp_pdev = pdev;
+ nvadsp_drv_data = drv;
+
+ hwmboxq_init(&drv->hwmbox_send_queue);
+
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/hwmailbox.h b/drivers/platform/tegra/nvadsp/hwmailbox.h
new file mode 100644
index 00000000..b0792961
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/hwmailbox.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HWMAILBOX_H
+#define __HWMAILBOX_H
+
+#include
+#include
+#include
+#include
+
+/*
+ * The interpretation of hwmailbox content is:
+ * 31 30 29 0
+ * [TAG|TYPE|MESSAGE]
+ */
+#define HWMBOX_TAG_SHIFT 31
+#define HWMBOX_TAG_MASK 0x1
+#define HWMBOX_TAG_INVALID 0
+#define HWMBOX_TAG_VALID 1
+/* Set Invalid TAG */
+#define SET_HWMBOX_TAG_INVALID (HWMBOX_TAG_INVALID << HWMBOX_TAG_SHIFT)
+/* Set Valid TAG */
+#define SET_HWMBOX_TAG_VALID (HWMBOX_TAG_VALID << HWMBOX_TAG_SHIFT)
+/* Get current TAG */
+#define HWMBOX_TAG(val) ((val & HWMBOX_TAG_MASK) << HWMBOX_TAG_SHIFT)
+
+/*
+ * Mailbox can be used for sending short messages and long messages
+ */
+#define HWMBOX_MSG_TYPE_SHIFT 30
+#define HWMBOX_MSG_TYPE_MASK 0x1
+#define HWMBOX_MSG_SMSG 0
+#define HWMBOX_MSG_LMSG 1
+/* Set SMSG type */
+#define SET_HWMBOX_MSG_SMSG (HWMBOX_MSG_SMSG << HWMBOX_MSG_TYPE_SHIFT)
+/* Set LMSG type */
+#define SET_HWMBOX_MSG_LMSG (HWMBOX_MSG_LMSG << HWMBOX_MSG_TYPE_SHIFT)
+/* Get MSG type */
+#define HWMBOX_MSG_TYPE(val) \
+ ((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK)
+/* Check if SMSG */
+#define IS_HWMBOX_MSG_SMSG(val) \
+ (!((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK))
+/* Check if LMSG */
+#define IS_HWMBOX_MSG_LMSG(val) \
+ ((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK)
+
+/*
+ * The format for a short message is:
+ * 31 30 29 20 19 0
+ * [TAG|TYPE|MBOX ID|SHORT MESSAGE]
+ * 1b 1b 10bits 20bits
+ */
+#define HWMBOX_SMSG_SHIFT 0
+#define HWMBOX_SMSG_MASK 0x3FFFFFFF
+#define HWMBOX_SMSG(val) ((val >> HWMBOX_SMSG_SHIFT) & HWMBOX_SMSG_MASK)
+#define HWMBOX_SMSG_MID_SHIFT 20
+#define HWMBOX_SMSG_MID_MASK 0x3FF
+#define HWMBOX_SMSG_MID(val) \
+ ((val >> HWMBOX_SMSG_MID_SHIFT) & HWMBOX_SMSG_MID_MASK)
+#define HWMBOX_SMSG_MSG_SHIFT 0
+#define HWMBOX_SMSG_MSG_MASK 0xFFFFF
+#define HWMBOX_SMSG_MSG(val) \
+ ((val >> HWMBOX_SMSG_MSG_SHIFT) & HWMBOX_SMSG_MSG_MASK)
+/* Set mailbox id for a short message */
+#define SET_HWMBOX_SMSG_MID(val) \
+ ((val & HWMBOX_SMSG_MID_MASK) << HWMBOX_SMSG_MID_SHIFT)
+/* Set msg value in a short message */
+#define SET_HWMBOX_SMSG_MSG(val) \
+ ((val & HWMBOX_SMSG_MSG_MASK) << HWMBOX_SMSG_MSG_SHIFT)
+
+/* Prepare a small message with mailbox id and data */
+#define PREPARE_HWMBOX_SMSG(mid, data) (SET_HWMBOX_TAG_VALID | \
+ SET_HWMBOX_MSG_SMSG | \
+ SET_HWMBOX_SMSG_MID(mid) | \
+ SET_HWMBOX_SMSG_MSG(data))
+/* Prepare empty mailbox value */
+#define PREPARE_HWMBOX_EMPTY_MSG() (HWMBOX_TAG_INVALID | 0x0)
+
+/*
+ * Queue size must be power of 2 as '&' op
+ * is being used to manage circular queues
+ */
+#define HWMBOX_QUEUE_SIZE 1024
+#define HWMBOX_QUEUE_SIZE_MASK (HWMBOX_QUEUE_SIZE - 1)
+struct hwmbox_queue {
+ uint32_t array[HWMBOX_QUEUE_SIZE];
+ uint16_t head;
+ uint16_t tail;
+ uint16_t count;
+ struct completion comp;
+ spinlock_t lock;
+};
+
+u32 hwmb_reg_idx(void);
+u32 hwmbox_readl(u32 reg);
+void hwmbox_writel(u32 val, u32 reg);
+int nvadsp_hwmbox_init(struct platform_device *);
+status_t nvadsp_hwmbox_send_data(uint16_t, uint32_t, uint32_t);
+void dump_mailbox_regs(void);
+
+int nvadsp_setup_hwmbox_interrupts(struct platform_device *pdev);
+void nvadsp_free_hwmbox_interrupts(struct platform_device *pdev);
+
+#endif /* __HWMAILBOX_H */
diff --git a/drivers/platform/tegra/nvadsp/log_state.h b/drivers/platform/tegra/nvadsp/log_state.h
new file mode 100644
index 00000000..581c1d92
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/log_state.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LOG_STATE_H
+#define __LOG_STATE_H
+
+#ifdef __ASSEMBLER__
+#define ENUM_START
+#define ENUM_VALUE(key, value) .equ key, value
+#define ENUM_END(typename)
+#else
+#define ENUM_START typedef enum {
+#define ENUM_VALUE(key, value) key = value,
+#define ENUM_END(typename) } typename;
+#endif
+
+#define STATE_LOG_MASK 0x7FFFFFFF
+
+ENUM_START
+
+ENUM_VALUE(ADSP_LOADER_MAIN_ENTRY, 0x1)
+ENUM_VALUE(ADSP_LOADER_MAIN_CACHE_DISABLE_COMPLETE, 0x2)
+ENUM_VALUE(ADSP_LOADER_MAIN_CONFIGURE_MMU_COMPLETE, 0x3)
+ENUM_VALUE(ADSP_LOADER_MAIN_CACHE_ENABLE_COMPLETE, 0x4)
+ENUM_VALUE(ADSP_LOADER_MAIN_FPU_ENABLE_COMPLETE, 0x5)
+ENUM_VALUE(ADSP_LOADER_MAIN_DECOMPRESSION_COMPLETE, 0x6)
+ENUM_VALUE(ADSP_LOADER_MAIN_EXIT, 0x7)
+
+ENUM_VALUE(ADSP_START_ENTRY_AT_RESET, 0x101)
+ENUM_VALUE(ADSP_START_CPU_EARLY_INIT, 0x102)
+ENUM_VALUE(ADSP_START_FIRST_BOOT, 0x103)
+ENUM_VALUE(ADSP_START_LK_MAIN_ENTRY, 0x104)
+
+ENUM_VALUE(ADSP_LK_MAIN_ENTRY, 0x201)
+ENUM_VALUE(ADSP_LK_MAIN_EARLY_THREAD_INIT_COMPLETE, 0x202)
+ENUM_VALUE(ADSP_LK_MAIN_EARLY_ARCH_INIT_COMPLETE, 0x203)
+ENUM_VALUE(ADSP_LK_MAIN_EARLY_PLATFORM_INIT_COMPLETE, 0x204)
+ENUM_VALUE(ADSP_LK_MAIN_EARLY_TARGET_INIT_COMPLETE, 0x205)
+ENUM_VALUE(ADSP_LK_MAIN_CONSTRUCTOR_INIT_COMPLETE, 0x206)
+ENUM_VALUE(ADSP_LK_MAIN_HEAP_INIT_COMPLETE, 0x207)
+ENUM_VALUE(ADSP_LK_MAIN_KERNEL_INIT_COMPLETE, 0x208)
+ENUM_VALUE(ADSP_LK_MAIN_CPU_RESUME_ENTRY, 0x209)
+
+ENUM_VALUE(ADSP_BOOTSTRAP2_ARCH_INIT_COMPLETE, 0x301)
+ENUM_VALUE(ADSP_BOOTSTRAP2_PLATFORM_INIT_COMPLETE, 0x302)
+ENUM_VALUE(ADSP_BOOTSTRAP2_TARGET_INIT_COMPLETE, 0x303)
+ENUM_VALUE(ADSP_BOOTSTRAP2_APP_MODULE_INIT_COMPLETE, 0x304)
+ENUM_VALUE(ADSP_BOOTSTRAP2_APP_INIT_COMPLETE, 0x305)
+ENUM_VALUE(ADSP_BOOTSTRAP2_STATIC_APP_INIT_COMPLETE, 0x306)
+ENUM_VALUE(ADSP_BOOTSTRAP2_OS_LOAD_COMPLETE, 0x307)
+
+ENUM_VALUE(ADSP_SUSPEND_BEGINS, 0x320)
+ENUM_VALUE(ADSP_SUSPEND_MBX_SEND_COMPLETE, 0x321)
+ENUM_VALUE(ADSP_SUSPEND_DISABLED_TIMERS, 0x322)
+ENUM_VALUE(ADSP_SUSPEND_DISABLED_INTS, 0x323)
+ENUM_VALUE(ADSP_SUSPEND_ARAM_SAVED, 0x324)
+ENUM_VALUE(ADSP_SUSPEND_AMC_SAVED, 0x325)
+ENUM_VALUE(ADSP_SUSPEND_AMISC_SAVED, 0x326)
+ENUM_VALUE(ADSP_SUSPEND_L1_CACHE_DISABLED, 0x327)
+ENUM_VALUE(ADSP_SUSPEND_L2_CACHE_DISABLED, 0x328)
+ENUM_VALUE(ADSP_RESUME_ADSP, 0x330)
+ENUM_VALUE(ADSP_RESUME_AMISC_RESTORED, 0x331)
+ENUM_VALUE(ADSP_RESUME_AMC_RESTORED, 0x332)
+ENUM_VALUE(ADSP_RESUME_ARAM_RESTORED, 0x333)
+ENUM_VALUE(ADSP_RESUME_COMPLETE, 0x334)
+ENUM_VALUE(ADSP_WFI_ENTER, 0x335)
+ENUM_VALUE(ADSP_WFI_EXIT, 0x336)
+ENUM_VALUE(ADSP_DFS_MBOX_RECV, 0x337)
+ENUM_VALUE(ADSP_DFS_MBOX_SENT, 0x338)
+
+ENUM_END(adsp_state)
+
+#endif
diff --git a/drivers/platform/tegra/nvadsp/mailbox.c b/drivers/platform/tegra/nvadsp/mailbox.c
new file mode 100644
index 00000000..94afbd43
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/mailbox.c
@@ -0,0 +1,353 @@
+/*
+ * ADSP mailbox manager
+ *
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "dev.h"
+#include
+#include
+
+#define NVADSP_MAILBOX_START 512
+#define NVADSP_MAILBOX_MAX 1024
+#define NVADSP_MAILBOX_OS_MAX 16
+
+static struct nvadsp_mbox *nvadsp_mboxes[NVADSP_MAILBOX_MAX];
+static DECLARE_BITMAP(nvadsp_mbox_ids, NVADSP_MAILBOX_MAX);
+static struct nvadsp_drv_data *nvadsp_drv_data;
+
+static inline bool is_mboxq_empty(struct nvadsp_mbox_queue *queue)
+{
+ return (queue->count == 0);
+}
+
+static inline bool is_mboxq_full(struct nvadsp_mbox_queue *queue)
+{
+ return (queue->count == NVADSP_MBOX_QUEUE_SIZE);
+}
+
+static void mboxq_init(struct nvadsp_mbox_queue *queue)
+{
+ queue->head = 0;
+ queue->tail = 0;
+ queue->count = 0;
+ init_completion(&queue->comp);
+ spin_lock_init(&queue->lock);
+}
+
+static void mboxq_destroy(struct nvadsp_mbox_queue *queue)
+{
+ if (!is_mboxq_empty(queue))
+ pr_info("Mbox queue %p is not empty.\n", queue);
+
+ queue->head = 0;
+ queue->tail = 0;
+ queue->count = 0;
+}
+
+static status_t mboxq_enqueue(struct nvadsp_mbox_queue *queue,
+ uint32_t data)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (is_mboxq_full(queue)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ spin_lock_irqsave(&queue->lock, flags);
+ if (is_mboxq_empty(queue))
+ complete_all(&queue->comp);
+
+ queue->array[queue->tail] = data;
+ queue->tail = (queue->tail + 1) & NVADSP_MBOX_QUEUE_SIZE_MASK;
+ queue->count++;
+ spin_unlock_irqrestore(&queue->lock, flags);
+ out:
+ return ret;
+}
+
+status_t nvadsp_mboxq_enqueue(struct nvadsp_mbox_queue *queue,
+ uint32_t data)
+{
+ return mboxq_enqueue(queue, data);
+}
+
+static status_t mboxq_dequeue(struct nvadsp_mbox_queue *queue,
+ uint32_t *data)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ if (is_mboxq_empty(queue)) {
+ ret = -EBUSY;
+ goto comp;
+ }
+
+ *data = queue->array[queue->head];
+ queue->head = (queue->head + 1) & NVADSP_MBOX_QUEUE_SIZE_MASK;
+ queue->count--;
+
+ if (is_mboxq_empty(queue))
+ goto comp;
+ else
+ goto out;
+ comp:
+ reinit_completion(&queue->comp);
+ out:
+ spin_unlock_irqrestore(&queue->lock, flags);
+ return ret;
+}
+
+static void mboxq_dump(struct nvadsp_mbox_queue *queue)
+{
+ unsigned long flags;
+ uint16_t head, count;
+ uint32_t data;
+
+ spin_lock_irqsave(&queue->lock, flags);
+
+ count = queue->count;
+ pr_info("nvadsp: queue %p count:%d\n", queue, count);
+
+ pr_info("nvadsp: queue data: ");
+ head = queue->head;
+ while (count) {
+ data = queue->array[head];
+ head = (head + 1) & NVADSP_MBOX_QUEUE_SIZE_MASK;
+ count--;
+ pr_info("0x%x ", data);
+ }
+ pr_info(" dumped\n");
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+static uint16_t nvadsp_mbox_alloc_mboxid(void)
+{
+ unsigned long start = NVADSP_MAILBOX_START;
+ unsigned int nr = 1;
+ unsigned long align = 0;
+ uint16_t mid;
+
+ mid = bitmap_find_next_zero_area(nvadsp_drv_data->mbox_ids,
+ NVADSP_MAILBOX_MAX - 1,
+ start, nr, align);
+
+ bitmap_set(nvadsp_drv_data->mbox_ids, mid, 1);
+ return mid;
+}
+
+static status_t nvadsp_mbox_free_mboxid(uint16_t mid)
+{
+ bitmap_clear(nvadsp_drv_data->mbox_ids, mid, 1);
+ return 0;
+}
+
+status_t nvadsp_mbox_open(struct nvadsp_mbox *mbox, uint16_t *mid,
+ const char *name, nvadsp_mbox_handler_t handler,
+ void *hdata)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (!nvadsp_drv_data) {
+ ret = -ENOSYS;
+ goto err;
+ }
+
+ spin_lock_irqsave(&nvadsp_drv_data->mbox_lock, flags);
+
+ if (!mbox) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (*mid == 0) {
+ mbox->id = nvadsp_mbox_alloc_mboxid();
+ if (mbox->id >= NVADSP_MAILBOX_MAX) {
+ ret = -ENOMEM;
+ mbox->id = 0;
+ goto out;
+ }
+ *mid = mbox->id;
+ } else {
+ if (*mid >= NVADSP_MAILBOX_MAX) {
+ pr_debug("%s: Invalid mailbox %d.\n",
+ __func__, *mid);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ *mid = array_index_nospec(*mid, NVADSP_MAILBOX_MAX);
+
+ if (nvadsp_drv_data->mboxes[*mid]) {
+ pr_debug("%s: mailbox %d already opened.\n",
+ __func__, *mid);
+ ret = -EADDRINUSE;
+ goto out;
+ }
+ mbox->id = *mid;
+ }
+
+ strncpy(mbox->name, name, NVADSP_MBOX_NAME_MAX);
+ mboxq_init(&mbox->recv_queue);
+ mbox->handler = handler;
+ mbox->hdata = hdata;
+
+ nvadsp_drv_data->mboxes[mbox->id] = mbox;
+ out:
+ spin_unlock_irqrestore(&nvadsp_drv_data->mbox_lock, flags);
+ err:
+ return ret;
+}
+EXPORT_SYMBOL(nvadsp_mbox_open);
+
+status_t nvadsp_mbox_send(struct nvadsp_mbox *mbox, uint32_t data,
+ uint32_t flags, bool block, unsigned int timeout)
+{
+ int ret = 0;
+
+ if (!nvadsp_drv_data) {
+ pr_err("ADSP drv_data is NULL\n");
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ if (!mbox) {
+ pr_err("ADSP MBOX is NULL\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ retry:
+ ret = nvadsp_hwmbox_send_data(mbox->id, data, flags);
+ if (!ret)
+ goto out;
+
+ if (ret == -EBUSY) {
+ if (block) {
+ ret = wait_for_completion_timeout(
+ &nvadsp_drv_data->hwmbox_send_queue.comp,
+ msecs_to_jiffies(timeout));
+ if (ret) {
+ pr_warn("ADSP HWMBOX send retry\n");
+ block = false;
+ goto retry;
+ } else {
+ pr_err("ADSP wait for completion timed out\n");
+ ret = -ETIME;
+ goto out;
+ }
+ } else {
+ pr_debug("Failed to enqueue data 0x%x. ret: %d\n",
+ data, ret);
+ }
+ } else if (ret) {
+ pr_warn("Failed to enqueue data 0x%x. ret: %d\n", data, ret);
+ goto out;
+ }
+ out:
+ return ret;
+}
+EXPORT_SYMBOL(nvadsp_mbox_send);
+
+status_t nvadsp_mbox_recv(struct nvadsp_mbox *mbox, uint32_t *data, bool block,
+ unsigned int timeout)
+{
+ int ret = 0;
+
+ if (!nvadsp_drv_data) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ if (!mbox) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ retry:
+ ret = mboxq_dequeue(&mbox->recv_queue, data);
+ if (!ret)
+ goto out;
+
+ if (ret == -EBUSY) {
+ if (block) {
+ ret = wait_for_completion_timeout(
+ &mbox->recv_queue.comp,
+ msecs_to_jiffies(timeout));
+ if (ret) {
+ block = false;
+ goto retry;
+ } else {
+ ret = -ETIME;
+ goto out;
+ }
+ } else {
+ pr_debug("Failed to receive data. ret: %d\n", ret);
+ }
+ } else if (ret) {
+ pr_debug("Failed to receive data. ret: %d\n", ret);
+ goto out;
+ }
+ out:
+ return ret;
+}
+EXPORT_SYMBOL(nvadsp_mbox_recv);
+
+status_t nvadsp_mbox_close(struct nvadsp_mbox *mbox)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (!nvadsp_drv_data) {
+ ret = -ENOSYS;
+ goto err;
+ }
+
+ spin_lock_irqsave(&nvadsp_drv_data->mbox_lock, flags);
+ if (!mbox) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!is_mboxq_empty(&mbox->recv_queue)) {
+ ret = -ENOTEMPTY;
+ mboxq_dump(&mbox->recv_queue);
+ goto out;
+ }
+
+ nvadsp_mbox_free_mboxid(mbox->id);
+ mboxq_destroy(&mbox->recv_queue);
+ nvadsp_drv_data->mboxes[mbox->id] = NULL;
+ out:
+ spin_unlock_irqrestore(&nvadsp_drv_data->mbox_lock, flags);
+ err:
+ return ret;
+}
+EXPORT_SYMBOL(nvadsp_mbox_close);
+
+status_t __init nvadsp_mbox_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
+
+ drv->mboxes = nvadsp_mboxes;
+ drv->mbox_ids = nvadsp_mbox_ids;
+
+ spin_lock_init(&drv->mbox_lock);
+
+ nvadsp_drv_data = drv;
+
+ return 0;
+}
diff --git a/drivers/platform/tegra/nvadsp/mem_manager.c b/drivers/platform/tegra/nvadsp/mem_manager.c
new file mode 100644
index 00000000..f0dc582e
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/mem_manager.c
@@ -0,0 +1,316 @@
+/*
+ * mem_manager.c
+ *
+ * memory manager
+ *
+ * Copyright (C) 2014-2018 NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
+
+#include
+#include
+#include
+#include
+#include
+
+#include "mem_manager.h"
+
+static void clear_alloc_list(struct mem_manager_info *mm_info);
+
+void *mem_request(void *mem_handle, const char *name, size_t size)
+{
+ unsigned long flags;
+ struct mem_manager_info *mm_info =
+ (struct mem_manager_info *)mem_handle;
+ struct mem_chunk *mc_iterator = NULL, *best_match_chunk = NULL;
+ struct mem_chunk *new_mc = NULL;
+
+ spin_lock_irqsave(&mm_info->lock, flags);
+
+ /* Is mem full? */
+ if (list_empty(mm_info->free_list)) {
+ pr_err("%s : memory full\n", mm_info->name);
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Find the best size match */
+ list_for_each_entry(mc_iterator, mm_info->free_list, node) {
+ if (mc_iterator->size >= size) {
+ if (best_match_chunk == NULL)
+ best_match_chunk = mc_iterator;
+ else if (mc_iterator->size < best_match_chunk->size)
+ best_match_chunk = mc_iterator;
+ }
+ }
+
+ /* Is free node found? */
+ if (best_match_chunk == NULL) {
+ pr_err("%s : no enough memory available\n", mm_info->name);
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Is it exact match? */
+ if (best_match_chunk->size == size) {
+ list_del(&best_match_chunk->node);
+ list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
+ if (best_match_chunk->address < mc_iterator->address) {
+ list_add_tail(&best_match_chunk->node,
+ &mc_iterator->node);
+ strlcpy(best_match_chunk->name, name,
+ NAME_SIZE);
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return best_match_chunk;
+ }
+ }
+ list_add(&best_match_chunk->node, mm_info->alloc_list);
+ strlcpy(best_match_chunk->name, name, NAME_SIZE);
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return best_match_chunk;
+ } else {
+ new_mc = kzalloc(sizeof(struct mem_chunk), GFP_ATOMIC);
+ if (unlikely(!new_mc)) {
+ pr_err("failed to allocate memory for mem_chunk\n");
+
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+ new_mc->address = best_match_chunk->address;
+ new_mc->size = size;
+ strlcpy(new_mc->name, name, NAME_SIZE);
+ best_match_chunk->address += size;
+ best_match_chunk->size -= size;
+ list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
+ if (new_mc->address < mc_iterator->address) {
+ list_add_tail(&new_mc->node,
+ &mc_iterator->node);
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return new_mc;
+ }
+ }
+ list_add_tail(&new_mc->node, mm_info->alloc_list);
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return new_mc;
+ }
+}
+
+/*
+ * Find the node with sepcified address and remove it from list
+ */
+bool mem_release(void *mem_handle, void *handle)
+{
+ unsigned long flags;
+ struct mem_manager_info *mm_info =
+ (struct mem_manager_info *)mem_handle;
+ struct mem_chunk *mc_curr = NULL, *mc_prev = NULL;
+ struct mem_chunk *mc_free = (struct mem_chunk *)handle;
+
+ pr_debug(" addr = %lu, size = %lu, name = %s\n",
+ mc_free->address, mc_free->size, mc_free->name);
+
+ spin_lock_irqsave(&mm_info->lock, flags);
+
+ list_for_each_entry(mc_curr, mm_info->free_list, node) {
+ if (mc_free->address < mc_curr->address) {
+
+ strlcpy(mc_free->name, "FREE", NAME_SIZE);
+
+ /* adjacent next free node */
+ if (mc_curr->address ==
+ (mc_free->address + mc_free->size)) {
+
+ mc_curr->address = mc_free->address;
+ mc_curr->size += mc_free->size;
+ list_del(&mc_free->node);
+ kfree(mc_free);
+
+ /* and adjacent prev free node */
+ if ((mc_prev != NULL) &&
+ ((mc_prev->address + mc_prev->size) ==
+ mc_curr->address)) {
+
+ mc_prev->size += mc_curr->size;
+ list_del(&mc_curr->node);
+ kfree(mc_curr);
+ }
+ }
+ /* adjacent prev free node */
+ else if ((mc_prev != NULL) &&
+ ((mc_prev->address + mc_prev->size) ==
+ mc_free->address)) {
+
+ mc_prev->size += mc_free->size;
+ list_del(&mc_free->node);
+ kfree(mc_free);
+ } else {
+ list_del(&mc_free->node);
+ list_add_tail(&mc_free->node,
+ &mc_curr->node);
+ }
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return true;
+ }
+ mc_prev = mc_curr;
+ }
+ spin_unlock_irqrestore(&mm_info->lock, flags);
+ return false;
+}
+
+inline unsigned long mem_get_address(void *handle)
+{
+ struct mem_chunk *mc = (struct mem_chunk *)handle;
+ return mc->address;
+}
+
+void mem_print(void *mem_handle)
+{
+ struct mem_manager_info *mm_info =
+ (struct mem_manager_info *)mem_handle;
+ struct mem_chunk *mc_iterator = NULL;
+
+ pr_info("------------------------------------\n");
+ pr_info("%s ALLOCATED\n", mm_info->name);
+ list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
+ pr_info(" addr = %lu, size = %lu, name = %s\n",
+ mc_iterator->address, mc_iterator->size,
+ mc_iterator->name);
+ }
+
+ pr_info("%s FREE\n", mm_info->name);
+ list_for_each_entry(mc_iterator, mm_info->free_list, node) {
+ pr_info(" addr = %lu, size = %lu, name = %s\n",
+ mc_iterator->address, mc_iterator->size,
+ mc_iterator->name);
+ }
+
+ pr_info("------------------------------------\n");
+}
+
+void mem_dump(void *mem_handle, struct seq_file *s)
+{
+ struct mem_manager_info *mm_info =
+ (struct mem_manager_info *)mem_handle;
+ struct mem_chunk *mc_iterator = NULL;
+
+ seq_puts(s, "---------------------------------------\n");
+ seq_printf(s, "%s ALLOCATED\n", mm_info->name);
+ list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
+ seq_printf(s, " addr = %lu, size = %lu, name = %s\n",
+ mc_iterator->address, mc_iterator->size,
+ mc_iterator->name);
+ }
+
+ seq_printf(s, "%s FREE\n", mm_info->name);
+ list_for_each_entry(mc_iterator, mm_info->free_list, node) {
+ seq_printf(s, " addr = %lu, size = %lu, name = %s\n",
+ mc_iterator->address, mc_iterator->size,
+ mc_iterator->name);
+ }
+
+ seq_puts(s, "---------------------------------------\n");
+}
+
+static void clear_alloc_list(struct mem_manager_info *mm_info)
+{
+ struct list_head *curr, *next;
+ struct mem_chunk *mc = NULL;
+
+ list_for_each_safe(curr, next, mm_info->alloc_list) {
+ mc = list_entry(curr, struct mem_chunk, node);
+ pr_debug(" addr = %lu, size = %lu, name = %s\n",
+ mc->address, mc->size,
+ mc->name);
+ mem_release(mm_info, mc);
+ }
+}
+
+void *create_mem_manager(const char *name, unsigned long start_address,
+ unsigned long size)
+{
+ void *ret = NULL;
+ struct mem_chunk *mc;
+ struct mem_manager_info *mm_info =
+ kzalloc(sizeof(struct mem_manager_info), GFP_KERNEL);
+ if (unlikely(!mm_info)) {
+ pr_err("failed to allocate memory for mem_manager_info\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ strlcpy(mm_info->name, name, NAME_SIZE);
+
+ mm_info->alloc_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (unlikely(!mm_info->alloc_list)) {
+ pr_err("failed to allocate memory for alloc_list\n");
+ ret = ERR_PTR(-ENOMEM);
+ goto free_mm_info;
+ }
+
+ mm_info->free_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (unlikely(!mm_info->free_list)) {
+ pr_err("failed to allocate memory for free_list\n");
+ ret = ERR_PTR(-ENOMEM);
+ goto free_alloc_list;
+ }
+
+ INIT_LIST_HEAD(mm_info->alloc_list);
+ INIT_LIST_HEAD(mm_info->free_list);
+
+ mm_info->start_address = start_address;
+ mm_info->size = size;
+
+ /* Add whole memory to free list */
+ mc = kzalloc(sizeof(struct mem_chunk), GFP_KERNEL);
+ if (unlikely(!mc)) {
+ pr_err("failed to allocate memory for mem_chunk\n");
+ ret = ERR_PTR(-ENOMEM);
+ goto free_free_list;
+ }
+
+ mc->address = mm_info->start_address;
+ mc->size = mm_info->size;
+ strlcpy(mc->name, "FREE", NAME_SIZE);
+ list_add(&mc->node, mm_info->free_list);
+ spin_lock_init(&mm_info->lock);
+
+ return (void *)mm_info;
+
+free_free_list:
+ kfree(mm_info->free_list);
+free_alloc_list:
+ kfree(mm_info->alloc_list);
+free_mm_info:
+ kfree(mm_info);
+
+ return ret;
+}
+
+void destroy_mem_manager(void *mem_handle)
+{
+ struct mem_manager_info *mm_info =
+ (struct mem_manager_info *)mem_handle;
+ struct mem_chunk *mc_last = NULL;
+
+ /* Clear all allocated memory */
+ clear_alloc_list(mm_info);
+
+ mc_last = list_entry((mm_info->free_list)->next,
+ struct mem_chunk, node);
+ list_del(&mc_last->node);
+
+ kfree(mc_last);
+ kfree(mm_info->alloc_list);
+ kfree(mm_info->free_list);
+ kfree(mm_info);
+}
diff --git a/drivers/platform/tegra/nvadsp/mem_manager.h b/drivers/platform/tegra/nvadsp/mem_manager.h
new file mode 100644
index 00000000..6ad04b72
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/mem_manager.h
@@ -0,0 +1,51 @@
+/*
+ * Header file for memory manager
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_NVADSP_MEM_MANAGER_H
+#define __TEGRA_NVADSP_MEM_MANAGER_H
+
+#include
+
+#define NAME_SIZE SZ_16
+
+struct mem_chunk {
+ struct list_head node;
+ char name[NAME_SIZE];
+ unsigned long address;
+ unsigned long size;
+};
+
+struct mem_manager_info {
+ struct list_head *alloc_list;
+ struct list_head *free_list;
+ char name[NAME_SIZE];
+ unsigned long start_address;
+ unsigned long size;
+ spinlock_t lock;
+};
+
+void *create_mem_manager(const char *name, unsigned long start_address,
+ unsigned long size);
+void destroy_mem_manager(void *mem_handle);
+
+void *mem_request(void *mem_handle, const char *name, size_t size);
+bool mem_release(void *mem_handle, void *handle);
+
+unsigned long mem_get_address(void *handle);
+
+void mem_print(void *mem_handle);
+void mem_dump(void *mem_handle, struct seq_file *s);
+
+#endif /* __TEGRA_NVADSP_MEM_MANAGER_H */
diff --git a/drivers/platform/tegra/nvadsp/msgq.c b/drivers/platform/tegra/nvadsp/msgq.c
new file mode 100644
index 00000000..1060eec2
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/msgq.c
@@ -0,0 +1,178 @@
+/*
+ * ADSP circular message queue
+ *
+ * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include
+
+#define msgq_wmemcpy(dest, src, words) \
+ memcpy(dest, src, (words) * sizeof(int32_t))
+
+
+/**
+ * msgq_init - Initialize message queue
+ * @msgq: pointer to the client message queue
+ * @size: size of message queue in words
+ * size will be capped to MSGQ_MAX_WSIZE
+ *
+ * This function returns 0 if no error has occurred.
+ *
+ * The message queue requires space for the queue to be
+ * preallocated and should only be initialized once. The queue
+ * space immediately follows the queue header and begins at
+ * msgq_t::message_queue. All messages are queued directly with
+ * no pointer address space translation.
+ *
+ *
+ */
+void msgq_init(msgq_t *msgq, int32_t size)
+{
+ if (MSGQ_MAX_QUEUE_WSIZE < size) {
+ /* cap the maximum size */
+ pr_info("msgq_init: %d size capped to MSGQ_MAX_QUEUE_WSIZE\n",
+ size);
+ size = MSGQ_MAX_QUEUE_WSIZE;
+ }
+
+ msgq->size = size;
+ msgq->read_index = 0;
+ msgq->write_index = 0;
+}
+EXPORT_SYMBOL(msgq_init);
+/**
+ * msgq_queue_message - Queues a message in the queue
+ * @msgq: pointer to the client message queue
+ * @message: Message buffer to copy from
+ *
+ * This function returns 0 if no error has occurred. ERR_NO_MEMORY will
+ * be returned if no space is available in the queue for the
+ * entire message. On ERR_NO_MEMORY, it may be possible the
+ * queue size was capped at init time to MSGQ_MAX_WSIZE if an
+ * unreasonable size was sepecified.
+ *
+ *
+ */
+int32_t msgq_queue_message(msgq_t *msgq, const msgq_message_t *message)
+{
+ int32_t ret = 0;
+
+ if (msgq && message) {
+ int32_t ri = msgq->read_index;
+ int32_t wi = msgq->write_index;
+ bool wrap = ri <= wi;
+ int32_t *start = msgq->queue;
+ int32_t *end = &msgq->queue[msgq->size];
+ int32_t *first = &msgq->queue[wi];
+ int32_t *last = &msgq->queue[ri];
+ int32_t qremainder = wrap ? end - first : last - first;
+ int32_t qsize = wrap ? qremainder + (last - start) : qremainder;
+ int32_t msize = &message->payload[message->size] -
+ (int32_t *)message;
+
+ if (qsize <= msize) {
+ /* don't allow read == write */
+ pr_err("%s failed: msgq ri: %d, wi %d, msg size %d\n",
+ __func__, msgq->read_index,
+ msgq->write_index, message->size);
+ ret = -ENOSPC;
+ } else if (msize < qremainder) {
+ msgq_wmemcpy(first, message, msize);
+ msgq->write_index = wi + MSGQ_MESSAGE_HEADER_WSIZE +
+ message->size;
+ } else {
+ /* message wrapped */
+ msgq_wmemcpy(first, message, qremainder);
+ msgq_wmemcpy(msgq->queue, (int32_t *)message +
+ qremainder, msize - qremainder);
+ msgq->write_index = wi + MSGQ_MESSAGE_HEADER_WSIZE +
+ message->size - msgq->size;
+ }
+ } else {
+ pr_err("NULL: msgq %p message %p\n", msgq, message);
+ ret = -EFAULT; /* Bad Address */
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(msgq_queue_message);
+/**
+ * msgq_dequeue_message - Dequeues a message from the queue
+ * @msgq: pointer to the client message queue
+ * @message: Message buffer to copy to or
+ * NULL to discard the current message
+ *
+ * This function returns 0 if no error has occurred.
+ * msgq_message_t::size will be set to the size of the message
+ * in words. ERR_NO_MEMORY will be returned if the buffer is too small
+ * for the queued message. ERR_NO_MSG will be returned if there is no
+ * message in the queue.
+ *
+ *
+ */
+int32_t msgq_dequeue_message(msgq_t *msgq, msgq_message_t *message)
+{
+ int32_t ret = 0;
+ int32_t ri;
+ int32_t wi;
+ msgq_message_t *msg;
+
+ if (!msgq) {
+ pr_err("NULL: msgq %p\n", msgq);
+ return -EFAULT; /* Bad Address */
+ }
+
+ ri = msgq->read_index;
+ wi = msgq->write_index;
+ msg = (msgq_message_t *)&msgq->queue[msgq->read_index];
+
+ if (ri == wi) {
+ /* empty queue */
+ if (message)
+ message->size = 0;
+ pr_err("%s failed: msgq ri: %d, wi %d; NO MSG\n",
+ __func__, msgq->read_index, msgq->write_index);
+ ret = -ENOMSG;
+ } else if (!message) {
+ /* no input buffer, discard top message */
+ ri += MSGQ_MESSAGE_HEADER_WSIZE + msg->size;
+ msgq->read_index = ri < msgq->size ? ri : ri - msgq->size;
+ } else if (message->size < msg->size) {
+ /* return buffer too small */
+ pr_err("%s failed: msgq ri: %d, wi %d, NO SPACE\n",
+ __func__, msgq->read_index, msgq->write_index);
+ message->size = msg->size;
+ ret = -ENOSPC;
+ } else {
+ /* copy message to the output buffer */
+ int32_t msize = MSGQ_MESSAGE_HEADER_WSIZE + msg->size;
+ int32_t *first = &msgq->queue[msgq->read_index];
+ int32_t *end = &msgq->queue[msgq->size];
+ int32_t qremainder = end - first;
+
+ if (msize < qremainder) {
+ msgq_wmemcpy(message, first, msize);
+ msgq->read_index = ri + MSGQ_MESSAGE_HEADER_WSIZE +
+ msg->size;
+ } else {
+ /* message wrapped */
+ msgq_wmemcpy(message, first, qremainder);
+ msgq_wmemcpy((int32_t *)message + qremainder,
+ msgq->queue, msize - qremainder);
+ msgq->read_index = ri + MSGQ_MESSAGE_HEADER_WSIZE +
+ msg->size - msgq->size;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(msgq_dequeue_message);
diff --git a/drivers/platform/tegra/nvadsp/nvadsp_shared_sema.c b/drivers/platform/tegra/nvadsp/nvadsp_shared_sema.c
new file mode 100644
index 00000000..6c591466
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/nvadsp_shared_sema.c
@@ -0,0 +1,41 @@
+/*
+ * nvadsp_shared_sema.c
+ *
+ * ADSP Shared Semaphores
+ *
+ * Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+
+nvadsp_shared_sema_t *
+nvadsp_shared_sema_init(uint8_t nvadsp_shared_sema_id)
+{
+ return NULL;
+}
+
+status_t nvadsp_shared_sema_destroy(nvadsp_shared_sema_t *sema)
+{
+ return -ENOENT;
+}
+
+status_t nvadsp_shared_sema_acquire(nvadsp_shared_sema_t *sema)
+{
+ return -ENOENT;
+}
+
+status_t nvadsp_shared_sema_release(nvadsp_shared_sema_t *sema)
+{
+ return -ENOENT;
+}
+
diff --git a/drivers/platform/tegra/nvadsp/os-t18x.c b/drivers/platform/tegra/nvadsp/os-t18x.c
new file mode 100644
index 00000000..c850892f
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/os-t18x.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2015-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+#include
+#else
+#include
+#endif
+#include
+#include
+#include
+#include
+
+#include "dev.h"
+#include "os.h"
+#include "dev-t18x.h"
+
+#if IS_ENABLED(CONFIG_TEGRA_HSP)
+static void nvadsp_dbell_handler(void *data)
+{
+ struct platform_device *pdev = data;
+ struct device *dev = &pdev->dev;
+
+ dev_info(dev, "APE DBELL handler\n");
+}
+#endif
+
+
+/* Function to return the ADMA page number (0 indexed) used by guest */
+static int tegra_adma_query_dma_page(void)
+{
+ struct device_node *np = NULL;
+ int adma_page = 0, ret = 0, i = 0;
+
+ static const char *compatible[] = {
+ "nvidia,tegra210-adma",
+ "nvidia,tegra210-adma-hv",
+ "nvidia,tegra186-adma",
+ "nvidia,tegra194-adma-hv",
+ };
+
+ for (i = 0; i < ARRAY_SIZE(compatible); i++) {
+ np = of_find_compatible_node(NULL, NULL, compatible[i]);
+ if (np == NULL)
+ continue;
+
+ /*
+ * In DT, "adma-page" property is 1 indexed
+ * If property is present, update return value to be 0 indexed
+ * If property is absent, return default value as page 0
+ */
+ ret = of_property_read_u32(np, "adma-page", &adma_page);
+ if (ret == 0)
+ adma_page = adma_page - 1;
+
+ break;
+ }
+
+ pr_info("%s: adma-page %d\n", __func__, adma_page);
+ return adma_page;
+}
+
+int nvadsp_os_t18x_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ int ret = 0, adma_ch_page, val = 0;
+
+ if (is_tegra_hypervisor_mode()) {
+
+ adma_ch_page = tegra_adma_query_dma_page();
+
+ /* Set ADSP to do decompression again */
+ val = ADSP_CONFIG_DECOMPRESS_EN << ADSP_CONFIG_DECOMPRESS_SHIFT;
+
+ /* Set ADSP to know its virtualized configuration */
+ val = val | (ADSP_CONFIG_VIRT_EN << ADSP_CONFIG_VIRT_SHIFT);
+
+ /* Encode DMA Page Bits with DMA page information */
+ val = val | (adma_ch_page << ADSP_CONFIG_DMA_PAGE_SHIFT);
+
+ /* Write to HWMBOX5 */
+ hwmbox_writel(val, drv_data->chip_data->adsp_os_config_hwmbox);
+
+ /* Clear HWMBOX0 for ADSP Guest reset handling */
+ hwmbox_writel(0, drv_data->chip_data->hwmb.hwmbox0_reg);
+
+ return 0;
+ }
+
+#if IS_ENABLED(CONFIG_TEGRA_HSP)
+ ret = tegra_hsp_db_add_handler(HSP_MASTER_APE,
+ nvadsp_dbell_handler, pdev);
+ if (ret)
+ dev_err(&pdev->dev,
+ "failed to add HSP_MASTER_APE DB handler\n");
+#endif
+
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/os-t21x.c b/drivers/platform/tegra/nvadsp/os-t21x.c
new file mode 100644
index 00000000..a445d5a0
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/os-t21x.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016-2017, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "dev.h"
+#include "dev-t21x.h"
+
+int nvadsp_os_t21x_init(struct platform_device *pdev)
+{
+ return 0;
+}
diff --git a/drivers/platform/tegra/nvadsp/os-t21x.h b/drivers/platform/tegra/nvadsp/os-t21x.h
new file mode 100644
index 00000000..e69de29b
diff --git a/drivers/platform/tegra/nvadsp/os.c b/drivers/platform/tegra/nvadsp/os.c
new file mode 100644
index 00000000..df6c2beb
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/os.c
@@ -0,0 +1,2542 @@
+/*
+ * os.c
+ *
+ * ADSP OS management
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+#include
+#else
+#include
+#endif
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "amisc.h"
+#include "ape_actmon.h"
+#include "os.h"
+#include "dev.h"
+#include "dram_app_mem_manager.h"
+#include "adsp_console_dbfs.h"
+#include "hwmailbox.h"
+#include "log_state.h"
+
+#define MAILBOX_REGION ".mbox_shared_data"
+#define DEBUG_RAM_REGION ".debug_mem_logs"
+
+/* Maximum number of LOAD MAPPINGS supported */
+#define NM_LOAD_MAPPINGS 20
+
+#define EOT 0x04 /* End of Transmission */
+#define SOH 0x01 /* Start of Header */
+#define BELL 0x07 /* Bell character */
+
+#define ADSP_TAG "\n[ADSP OS]"
+
+#define UART_BAUD_RATE 9600
+
+/* Intiialize with FIXED rate, once OS boots up DFS will set required freq */
+#define ADSP_TO_APE_CLK_RATIO 2
+/* 13.5 MHz, should be changed at bringup time */
+#define APE_CLK_FIX_RATE 13500
+/*
+ * ADSP CLK = APE_CLK * ADSP_TO_APE_CLK_RATIO
+ * or
+ * ADSP CLK = APE_CLK >> ADSP_TO_APE_CLK_RATIO
+ */
+#define ADSP_CLK_FIX_RATE (APE_CLK_FIX_RATE * ADSP_TO_APE_CLK_RATIO)
+
+/* total number of crashes allowed on adsp */
+#define ALLOWED_CRASHES 1
+
+#define LOGGER_TIMEOUT 1 /* in ms */
+#define ADSP_WFI_TIMEOUT 800 /* in ms */
+#define LOGGER_COMPLETE_TIMEOUT 500 /* in ms */
+
+#define SEARCH_SOH_RETRY 2
+
+#define DUMP_BUFF 128
+
+struct nvadsp_debug_log {
+ struct device *dev;
+ char *debug_ram_rdr;
+ int debug_ram_sz;
+ int ram_iter;
+ atomic_t is_opened;
+ wait_queue_head_t wait_queue;
+ struct completion complete;
+};
+
+struct nvadsp_os_data {
+ void __iomem *unit_fpga_reset_reg;
+ const struct firmware *os_firmware;
+ struct platform_device *pdev;
+ struct global_sym_info *adsp_glo_sym_tbl;
+ void __iomem *hwmailbox_base;
+ struct resource **dram_region;
+ struct nvadsp_debug_log logger;
+ struct nvadsp_cnsl console;
+ struct work_struct restart_os_work;
+ int adsp_num_crashes;
+ bool adsp_os_fw_loaded;
+ struct mutex fw_load_lock;
+ bool os_running;
+ struct mutex os_run_lock;
+ dma_addr_t adsp_os_addr;
+ size_t adsp_os_size;
+ dma_addr_t app_alloc_addr;
+ size_t app_size;
+ int num_start; /* registers number of time start called */
+};
+
+static struct nvadsp_os_data priv;
+
+struct nvadsp_mappings {
+ phys_addr_t da;
+ void *va;
+ int len;
+};
+
+#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+static inline u8 tegra_get_major_rev(void) { return 0; }
+#endif
+
+static struct nvadsp_mappings adsp_map[NM_LOAD_MAPPINGS];
+static int map_idx;
+static struct nvadsp_mbox adsp_com_mbox;
+
+static DECLARE_COMPLETION(entered_wfi);
+
+static void __nvadsp_os_stop(bool);
+static irqreturn_t adsp_wdt_handler(int irq, void *arg);
+static irqreturn_t adsp_wfi_handler(int irq, void *arg);
+
+/*
+ * set by adsp audio driver through exported api nvadsp_set_adma_dump_reg
+ * used to dump adma registers incase of failures for debug
+ */
+static void (*nvadsp_tegra_adma_dump_ch_reg)(void);
+
+#ifdef CONFIG_DEBUG_FS
+static int adsp_logger_open(struct inode *inode, struct file *file)
+{
+ struct nvadsp_debug_log *logger = inode->i_private;
+ int ret = -EBUSY;
+ char *start;
+ int i;
+
+ mutex_lock(&priv.os_run_lock);
+ if (!priv.num_start) {
+ mutex_unlock(&priv.os_run_lock);
+ goto err_ret;
+ }
+ mutex_unlock(&priv.os_run_lock);
+
+ /*
+ * checks if is_opened is 0, if yes, set 1 and proceed,
+ * else return -EBUSY
+ */
+ if (atomic_cmpxchg(&logger->is_opened, 0, 1))
+ goto err_ret;
+
+ /* loop till writer is initilized with SOH */
+ for (i = 0; i < SEARCH_SOH_RETRY; i++) {
+
+ ret = wait_event_interruptible_timeout(logger->wait_queue,
+ memchr(logger->debug_ram_rdr, SOH,
+ logger->debug_ram_sz),
+ msecs_to_jiffies(LOGGER_TIMEOUT));
+ if (ret == -ERESTARTSYS) /* check if interrupted */
+ goto err;
+
+ start = memchr(logger->debug_ram_rdr, SOH,
+ logger->debug_ram_sz);
+ if (start)
+ break;
+ }
+
+ if (i == SEARCH_SOH_RETRY) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* maxdiff can be 0, therefore valid */
+ logger->ram_iter = start - logger->debug_ram_rdr;
+
+ file->private_data = logger;
+ return 0;
+err:
+ /* reset to 0 so as to mention the node is free */
+ atomic_set(&logger->is_opened, 0);
+err_ret:
+ return ret;
+}
+
+
+static int adsp_logger_flush(struct file *file, fl_owner_t id)
+{
+ struct nvadsp_debug_log *logger = file->private_data;
+ struct device *dev = logger->dev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* reset to 0 so as to mention the node is free */
+ atomic_set(&logger->is_opened, 0);
+ return 0;
+}
+
+static int adsp_logger_release(struct inode *inode, struct file *file)
+{
+ struct nvadsp_debug_log *logger = inode->i_private;
+
+ atomic_set(&logger->is_opened, 0);
+ return 0;
+}
+
+static ssize_t adsp_logger_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct nvadsp_debug_log *logger = file->private_data;
+ struct device *dev = logger->dev;
+ ssize_t ret_num_char = 1;
+ char last_char;
+
+ last_char = logger->debug_ram_rdr[logger->ram_iter];
+
+ if ((last_char != EOT) && (last_char != 0)) {
+#if CONFIG_ADSP_DRAM_LOG_WITH_TAG
+ if ((last_char == '\n') || (last_char == '\r')) {
+ size_t num_char = min(count, sizeof(ADSP_TAG) - 1);
+
+ if (copy_to_user(buf, ADSP_TAG, num_char)) {
+ dev_err(dev, "%s failed in copying tag\n", __func__);
+ ret_num_char = -EFAULT;
+ goto exit;
+ }
+ ret_num_char = num_char;
+
+ } else
+#endif
+ if (copy_to_user(buf, &last_char, 1)) {
+ dev_err(dev, "%s failed in copying character\n", __func__);
+ ret_num_char = -EFAULT;
+ goto exit;
+ }
+
+ logger->ram_iter =
+ (logger->ram_iter + 1) % logger->debug_ram_sz;
+ goto exit;
+ }
+
+ complete(&logger->complete);
+ ret_num_char = wait_event_interruptible_timeout(logger->wait_queue,
+ logger->debug_ram_rdr[logger->ram_iter] != EOT,
+ msecs_to_jiffies(LOGGER_TIMEOUT));
+ if (ret_num_char == -ERESTARTSYS) {
+ goto exit;
+ }
+
+ last_char = BELL;
+ if (copy_to_user(buf, &last_char, 1)) {
+ dev_err(dev, "%s failed in copying bell character\n", __func__);
+ ret_num_char = -EFAULT;
+ goto exit;
+ }
+ ret_num_char = 1;
+exit:
+ return ret_num_char;
+}
+
+static const struct file_operations adsp_logger_operations = {
+ .read = adsp_logger_read,
+ .open = adsp_logger_open,
+ .release = adsp_logger_release,
+ .llseek = generic_file_llseek,
+ .flush = adsp_logger_flush,
+};
+
+static int adsp_create_debug_logger(struct dentry *adsp_debugfs_root)
+{
+ struct nvadsp_debug_log *logger = &priv.logger;
+ struct device *dev = &priv.pdev->dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(adsp_debugfs_root)) {
+ ret = -ENOENT;
+ goto err_out;
+ }
+
+ atomic_set(&logger->is_opened, 0);
+ init_waitqueue_head(&logger->wait_queue);
+ init_completion(&logger->complete);
+ if (!debugfs_create_file("adsp_logger", S_IRUGO,
+ adsp_debugfs_root, logger,
+ &adsp_logger_operations)) {
+ dev_err(dev, "unable to create adsp logger debug fs file\n");
+ ret = -ENOENT;
+ }
+
+err_out:
+ return ret;
+}
+#endif
+
+bool is_adsp_dram_addr(u64 addr)
+{
+ int i;
+ struct resource **dram = priv.dram_region;
+
+ for (i = 0; i < ADSP_MAX_DRAM_MAP; i++) {
+ if ((dram[i]->start) && (addr >= dram[i]->start) &&
+ (addr <= dram[i]->end))
+ return true;
+ }
+ return false;
+}
+
+int nvadsp_add_load_mappings(phys_addr_t pa, void *mapping, int len)
+{
+ if (map_idx < 0 || map_idx >= NM_LOAD_MAPPINGS)
+ return -EINVAL;
+
+ adsp_map[map_idx].da = pa;
+ adsp_map[map_idx].va = mapping;
+ adsp_map[map_idx].len = len;
+ map_idx++;
+ return 0;
+}
+
+void *nvadsp_da_to_va_mappings(u64 da, int len)
+{
+ void *ptr = NULL;
+ int i;
+
+ for (i = 0; i < map_idx; i++) {
+ int offset = da - adsp_map[i].da;
+
+ /* try next carveout if da is too small */
+ if (offset < 0)
+ continue;
+
+ /* try next carveout if da is too large */
+ if (offset + len > adsp_map[i].len)
+ continue;
+
+ ptr = adsp_map[i].va + offset;
+ break;
+ }
+ return ptr;
+}
+
+void *nvadsp_alloc_coherent(size_t size, dma_addr_t *da, gfp_t flags)
+{
+ struct device *dev;
+ void *va = NULL;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ goto end;
+ }
+
+ dev = &priv.pdev->dev;
+ va = dma_alloc_coherent(dev, size, da, flags);
+ if (!va) {
+ dev_err(dev, "unable to allocate the memory for size %lu\n",
+ size);
+ goto end;
+ }
+ WARN(!is_adsp_dram_addr(*da), "bus addr %llx beyond %x\n",
+ *da, UINT_MAX);
+end:
+ return va;
+}
+EXPORT_SYMBOL(nvadsp_alloc_coherent);
+
+void nvadsp_free_coherent(size_t size, void *va, dma_addr_t da)
+{
+ struct device *dev;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ return;
+ }
+ dev = &priv.pdev->dev;
+ dma_free_coherent(dev, size, va, da);
+}
+EXPORT_SYMBOL(nvadsp_free_coherent);
+
+struct elf32_shdr *
+nvadsp_get_section(const struct firmware *fw, char *sec_name)
+{
+ int i;
+ struct device *dev = &priv.pdev->dev;
+ const u8 *elf_data = fw->data;
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
+ struct elf32_shdr *shdr;
+ const char *name_table;
+
+ /* look for the resource table and handle it */
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
+
+ for (i = 0; i < ehdr->e_shnum; i++, shdr++)
+ if (!strcmp(name_table + shdr->sh_name, sec_name)) {
+ dev_dbg(dev, "found the section %s\n",
+ name_table + shdr->sh_name);
+ return shdr;
+ }
+ return NULL;
+}
+
+static inline void __maybe_unused dump_global_symbol_table(void)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct global_sym_info *table = priv.adsp_glo_sym_tbl;
+ int num_ent;
+ int i;
+
+ if (!table) {
+ dev_err(dev, "no table not created\n");
+ return;
+ }
+ num_ent = table[0].addr;
+ dev_info(dev, "total number of entries in global symbol table %d\n",
+ num_ent);
+
+ pr_info("NAME ADDRESS TYPE\n");
+ for (i = 1; i < num_ent; i++)
+ pr_info("%s %x %s\n", table[i].name, table[i].addr,
+ ELF32_ST_TYPE(table[i].info) == STT_FUNC ?
+ "STT_FUNC" : "STT_OBJECT");
+}
+
+#ifdef CONFIG_ANDROID
+static int
+__maybe_unused create_global_symbol_table(const struct firmware *fw)
+{
+ int i;
+ struct device *dev = &priv.pdev->dev;
+ struct elf32_shdr *sym_shdr = nvadsp_get_section(fw, ".symtab");
+ struct elf32_shdr *str_shdr = nvadsp_get_section(fw, ".strtab");
+ const u8 *elf_data = fw->data;
+ const char *name_table;
+ /* The first entry stores the number of entries in the array */
+ int num_ent = 1;
+ struct elf32_sym *sym;
+ struct elf32_sym *last_sym;
+
+ if (!sym_shdr || !str_shdr) {
+ dev_dbg(dev, "section symtab/strtab not found!\n");
+ return -EINVAL;
+ }
+
+ sym = (struct elf32_sym *)(elf_data + sym_shdr->sh_offset);
+ name_table = elf_data + str_shdr->sh_offset;
+
+ num_ent += sym_shdr->sh_size / sizeof(struct elf32_sym);
+ priv.adsp_glo_sym_tbl = devm_kzalloc(dev,
+ sizeof(struct global_sym_info) * num_ent, GFP_KERNEL);
+ if (!priv.adsp_glo_sym_tbl)
+ return -ENOMEM;
+
+ last_sym = sym + num_ent;
+
+ for (i = 1; sym < last_sym; sym++) {
+ unsigned char info = sym->st_info;
+ unsigned char type = ELF32_ST_TYPE(info);
+ if ((ELF32_ST_BIND(sym->st_info) == STB_GLOBAL) &&
+ ((type == STT_OBJECT) || (type == STT_FUNC))) {
+ char *name = priv.adsp_glo_sym_tbl[i].name;
+
+ strlcpy(name, name_table + sym->st_name, SYM_NAME_SZ);
+ priv.adsp_glo_sym_tbl[i].addr = sym->st_value;
+ priv.adsp_glo_sym_tbl[i].info = info;
+ i++;
+ }
+ }
+ priv.adsp_glo_sym_tbl[0].addr = i;
+ return 0;
+}
+#endif /* CONFIG_ANDROID */
+
+struct global_sym_info * __maybe_unused find_global_symbol(const char *sym_name)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct global_sym_info *table = priv.adsp_glo_sym_tbl;
+ int num_ent;
+ int i;
+
+ if (unlikely(!table)) {
+ dev_err(dev, "symbol table not present\n");
+ return NULL;
+ }
+ num_ent = table[0].addr;
+
+ for (i = 1; i < num_ent; i++) {
+ if (!strncmp(table[i].name, sym_name, SYM_NAME_SZ))
+ return &table[i];
+ }
+ return NULL;
+}
+
+static void *get_mailbox_shared_region(const struct firmware *fw)
+{
+ struct device *dev;
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(priv.pdev);
+ struct elf32_shdr *shdr;
+ int addr;
+ int size;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev = &priv.pdev->dev;
+
+ shdr = nvadsp_get_section(fw, MAILBOX_REGION);
+ if (!shdr) {
+ dev_dbg(dev, "section %s not found\n", MAILBOX_REGION);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev_dbg(dev, "the shared section is present at 0x%x\n", shdr->sh_addr);
+ addr = shdr->sh_addr;
+ size = shdr->sh_size;
+ drv_data->shared_adsp_os_data_iova = addr;
+ return nvadsp_da_to_va_mappings(addr, size);
+}
+
+static void copy_io_in_l(void *to, const void *from, int sz)
+{
+ int i;
+ for (i = 0; i < sz; i += 4) {
+ u32 val = *(u32 *)(from + i);
+ writel(val, (void __iomem *)(to + i));
+ }
+}
+
+static int nvadsp_os_elf_load(const struct firmware *fw)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(priv.pdev);
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ void *va;
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ va = nvadsp_da_to_va_mappings(da, filesz);
+ if (!va) {
+ dev_err(dev, "no va for da 0x%x filesz 0x%x\n",
+ da, filesz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* put the segment where the remote processor expects it */
+ if (filesz) {
+ if (is_adsp_dram_addr(da))
+ memcpy(va, elf_data + offset, filesz);
+ else if ((da == drv_data->evp_base[ADSP_EVP_BASE]) &&
+ (filesz == drv_data->evp_base[ADSP_EVP_SIZE])) {
+
+ drv_data->state.evp_ptr = va;
+ memcpy(drv_data->state.evp,
+ elf_data + offset, filesz);
+ } else {
+ dev_err(dev, "can't load mem pa:0x%x va:%p\n",
+ da, va);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Allocate a dma buffer and map it to a specified iova
+ * Return valid cpu virtual address on success or NULL on failure
+ */
+static void *nvadsp_dma_alloc_and_map_at(struct platform_device *pdev,
+ size_t size, dma_addr_t iova,
+ gfp_t flags)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+ unsigned long align_mask = ~0UL << fls_long(size - 1);
+ struct device *dev = &pdev->dev;
+ dma_addr_t aligned_iova = iova & align_mask;
+ dma_addr_t end = iova + size;
+ dma_addr_t tmp_iova, offset;
+ phys_addr_t pa, pa_new;
+ void *cpu_va;
+ int ret;
+ unsigned long shift = 0, pg_size = 0, mp_size = 0;
+
+ if (!domain) {
+ dev_err(dev, "Unable to get iommu_domain\n");
+ return NULL;
+ }
+
+ shift = __ffs(domain->pgsize_bitmap);
+ pg_size = 1UL << shift;
+ mp_size = pg_size;
+
+ /*
+ * Reserve iova range using aligned size: adsp memory might not start
+ * from an aligned address by power of 2, while iommu_dma_alloc_iova()
+ * would shift the allocation off the target iova so as to align start
+ * address by power of 2. To prevent this shifting, use aligned size.
+ * It might allocate an excessive iova region but it would be handled
+ * by IOMMU core during iommu_dma_free_iova().
+ */
+ tmp_iova = iommu_dma_alloc_iova(dev, end - aligned_iova, end - pg_size);
+ if (tmp_iova != aligned_iova) {
+ dev_err(dev, "failed to reserve iova range [%llx, %llx]\n",
+ aligned_iova, end);
+ return NULL;
+ }
+
+ dev_dbg(dev, "Reserved iova region [%llx, %llx]\n", aligned_iova, end);
+
+ /* Allocate a memory first and get a tmp_iova */
+ cpu_va = dma_alloc_coherent(dev, size, &tmp_iova, flags);
+ if (!cpu_va)
+ goto fail_dma_alloc;
+
+ /* Use tmp_iova to remap non-contiguous pages to the desired iova */
+ for (offset = 0; offset < size; offset += mp_size) {
+ dma_addr_t cur_iova = tmp_iova + offset;
+
+ mp_size = pg_size;
+ pa = iommu_iova_to_phys(domain, cur_iova);
+ /* Checking if next physical addresses are contiguous */
+ for ( ; offset + mp_size < size; mp_size += pg_size) {
+ pa_new = iommu_iova_to_phys(domain, cur_iova + mp_size);
+ if (pa + mp_size != pa_new)
+ break;
+ }
+
+ /* Remap the contiguous physical addresses together */
+ ret = iommu_map(domain, iova + offset, pa, mp_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ dev_err(dev, "failed to map pa %llx va %llx size %lx\n",
+ pa, iova + offset, mp_size);
+ goto fail_map;
+ }
+
+ /* Verify if the new iova is correctly mapped */
+ if (pa != iommu_iova_to_phys(domain, iova + offset)) {
+ dev_err(dev, "mismatched pa 0x%llx <-> 0x%llx\n",
+ pa, iommu_iova_to_phys(domain, iova + offset));
+ goto fail_map;
+ }
+ }
+
+ /* Unmap and free the tmp_iova since target iova is linked */
+ iommu_unmap(domain, tmp_iova, size);
+ iommu_dma_free_iova(dev, tmp_iova, size);
+
+ return cpu_va;
+
+fail_map:
+ iommu_unmap(domain, iova, offset);
+ dma_free_coherent(dev, size, cpu_va, tmp_iova);
+fail_dma_alloc:
+ iommu_dma_free_iova(dev, end - aligned_iova, end - pg_size);
+
+ return NULL;
+}
+
+static int allocate_memory_for_adsp_os(void)
+{
+ struct platform_device *pdev = priv.pdev;
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct resource *co_mem = &drv_data->co_mem;
+#if defined(CONFIG_TEGRA_NVADSP_ON_SMMU)
+ dma_addr_t addr;
+#else
+ phys_addr_t addr;
+#endif
+ void *dram_va;
+ size_t size;
+ int ret = 0;
+
+ addr = priv.adsp_os_addr;
+ size = priv.adsp_os_size;
+
+ if (co_mem->start) {
+ dram_va = devm_memremap(dev, co_mem->start,
+ size, MEMREMAP_WT);
+ if (IS_ERR(dram_va)) {
+ dev_err(dev, "unable to map CO mem: %pR\n", co_mem);
+ ret = -ENOMEM;
+ goto end;
+ }
+ dev_info(dev, "Mapped CO mem: %pR\n", co_mem);
+ goto map_and_end;
+ }
+
+#if defined(CONFIG_TEGRA_NVADSP_ON_SMMU)
+ dram_va = nvadsp_dma_alloc_and_map_at(pdev, size, addr, GFP_KERNEL);
+ if (!dram_va) {
+ dev_err(dev, "unable to allocate SMMU pages\n");
+ ret = -ENOMEM;
+ goto end;
+ }
+#else
+ dram_va = ioremap(addr, size);
+ if (!dram_va) {
+ dev_err(dev, "remap failed for addr 0x%llx\n", addr);
+ ret = -ENOMEM;
+ goto end;
+ }
+#endif
+
+map_and_end:
+ nvadsp_add_load_mappings(addr, dram_va, size);
+end:
+ return ret;
+}
+
+static void deallocate_memory_for_adsp_os(void)
+{
+ struct platform_device *pdev = priv.pdev;
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ void *va = nvadsp_da_to_va_mappings(priv.adsp_os_addr,
+ priv.adsp_os_size);
+
+ if (drv_data->co_mem.start) {
+ devm_memunmap(dev, va);
+ return;
+ }
+
+#if defined(CONFIG_TEGRA_NVADSP_ON_SMMU)
+ dma_free_coherent(dev, priv.adsp_os_size, va, priv.adsp_os_addr);
+#endif
+}
+
+static void nvadsp_set_shared_mem(struct platform_device *pdev,
+ struct nvadsp_shared_mem *shared_mem,
+ uint32_t dynamic_app_support)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct nvadsp_os_args *os_args;
+ u32 chip_id;
+
+ shared_mem->os_args.dynamic_app_support = dynamic_app_support;
+ /* set logger strcuture with required properties */
+ priv.logger.debug_ram_rdr = shared_mem->os_args.logger;
+ priv.logger.debug_ram_sz = sizeof(shared_mem->os_args.logger);
+ priv.logger.dev = dev;
+ priv.adsp_os_fw_loaded = true;
+
+ chip_id = (u32)tegra_get_chip_id();
+ if (drv_data->chip_data->chipid_ext)
+ chip_id = (chip_id << 4) | tegra_get_major_rev();
+
+ os_args = &shared_mem->os_args;
+ /* Chip id info is communicated twice to ADSP
+ * TODO::clean up the redundant comm.
+ */
+ os_args->chip_id = chip_id;
+
+ /*
+ * Tegra platform is encoded in the upper 16 bits
+ * of chip_id; can be improved to make this a
+ * separate member in nvadsp_os_args
+ */
+ os_args->chip_id |= (drv_data->tegra_platform << 16);
+
+ drv_data->shared_adsp_os_data = shared_mem;
+}
+
+static int __nvadsp_os_secload(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ dma_addr_t addr = drv_data->adsp_mem[ACSR_ADDR];
+ size_t size = drv_data->adsp_mem[ACSR_SIZE];
+ struct device *dev = &pdev->dev;
+ void *dram_va;
+
+ if (drv_data->chip_data->adsp_shared_mem_hwmbox != 0) {
+ dram_va = nvadsp_alloc_coherent(size, &addr, GFP_KERNEL);
+ if (dram_va == NULL) {
+ dev_err(dev, "unable to allocate shared region\n");
+ return -ENOMEM;
+ }
+ } else {
+ dram_va = nvadsp_dma_alloc_and_map_at(pdev, size, addr,
+ GFP_KERNEL);
+ if (dram_va == NULL) {
+ dev_err(dev, "unable to allocate shared region\n");
+ return -ENOMEM;
+ }
+ }
+
+ drv_data->shared_adsp_os_data_iova = addr;
+ nvadsp_set_shared_mem(pdev, dram_va, 0);
+
+ return 0;
+}
+
+static int nvadsp_firmware_load(struct platform_device *pdev)
+{
+ struct nvadsp_shared_mem *shared_mem;
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ const struct firmware *fw;
+ int ret = 0;
+
+ ret = request_firmware(&fw, drv_data->adsp_elf, dev);
+ if (ret < 0) {
+ dev_err(dev, "reqest firmware for %s failed with %d\n",
+ drv_data->adsp_elf, ret);
+ goto end;
+ }
+#ifdef CONFIG_ANDROID
+ ret = create_global_symbol_table(fw);
+ if (ret) {
+ dev_err(dev, "unable to create global symbol table\n");
+ goto release_firmware;
+ }
+#endif
+ ret = allocate_memory_for_adsp_os();
+ if (ret) {
+ dev_err(dev, "unable to allocate memory for adsp os\n");
+ goto release_firmware;
+ }
+
+ dev_info(dev, "Loading ADSP OS firmware %s\n", drv_data->adsp_elf);
+
+ ret = nvadsp_os_elf_load(fw);
+ if (ret) {
+ dev_err(dev, "failed to load %s\n", drv_data->adsp_elf);
+ goto deallocate_os_memory;
+ }
+
+ shared_mem = get_mailbox_shared_region(fw);
+ if (IS_ERR_OR_NULL(shared_mem)) {
+ if (drv_data->chip_data->adsp_shared_mem_hwmbox != 0) {
+ /*
+ * If FW is not explicitly defining a shared memory
+ * region then assume it to be placed at the start
+ * of OS memory and communicate the same via MBX
+ */
+ drv_data->shared_adsp_os_data_iova = priv.adsp_os_addr;
+ shared_mem = nvadsp_da_to_va_mappings(
+ priv.adsp_os_addr, priv.adsp_os_size);
+ if (!shared_mem) {
+ dev_err(dev, "Failed to get VA for ADSP OS\n");
+ goto deallocate_os_memory;
+ }
+ } else {
+ dev_err(dev, "failed to locate shared memory\n");
+ goto deallocate_os_memory;
+ }
+ }
+ nvadsp_set_shared_mem(pdev, shared_mem, 1);
+
+ ret = dram_app_mem_init(priv.app_alloc_addr, priv.app_size);
+ if (ret) {
+ dev_err(dev, "Memory allocation dynamic apps failed\n");
+ goto deallocate_os_memory;
+ }
+ priv.os_firmware = fw;
+
+ return 0;
+
+deallocate_os_memory:
+ deallocate_memory_for_adsp_os();
+release_firmware:
+ release_firmware(fw);
+end:
+ return ret;
+
+}
+
+#ifdef CONFIG_TEGRA_ADSP_MULTIPLE_FW
+
+#define MFW_MAX_OTHER_CORES 3
+dma_addr_t mfw_smem_iova[MFW_MAX_OTHER_CORES];
+void *mfw_hsp_va[MFW_MAX_OTHER_CORES];
+
+static int nvadsp_load_multi_fw(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int i, j, ret;
+ void *dram_va, *hsp_va;
+ dma_addr_t shrd_mem_iova;
+ struct device_node *hsp_node;
+ struct resource hsp_inst;
+ u32 hsp_int, hsp_int_targ;
+
+ hsp_node = of_get_child_by_name(dev->of_node, "ahsp_sm_multi");
+ if (!hsp_node) {
+ dev_err(dev, "missing ahsp_sm addr\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; !of_address_to_resource(hsp_node, i, &hsp_inst); i++) {
+ if (i >= MFW_MAX_OTHER_CORES) {
+ dev_err(dev, "core out of bound\n");
+ break;
+ }
+
+ if (drv_data->adsp_os_secload) {
+ /* For front door boot MB2 would have loaded
+ * the FW for all the cores; only shared
+ * memory neeeds to be allocated and set
+ */
+ size_t size = drv_data->adsp_mem[ACSR_SIZE];
+
+ dram_va = nvadsp_alloc_coherent(
+ size, &shrd_mem_iova, GFP_KERNEL);
+ if (!dram_va) {
+ dev_err(dev,
+ "mem alloc failed for adsp %d\n", i);
+ continue;
+ }
+ } else {
+ const struct firmware *fw;
+ const char *adsp_elf;
+ u32 os_mem, os_size;
+
+ ret = of_property_read_string_index(
+ dev->of_node, "nvidia,adsp_elf_multi",
+ i, &adsp_elf);
+ if (ret) {
+ dev_err(dev, "err reading adsp FW %d: %d\n",
+ (i + 1), ret);
+ continue;
+ }
+
+ if (!strcmp(adsp_elf, ""))
+ continue;
+
+ ret = request_firmware(&fw, adsp_elf, dev);
+ if (ret < 0) {
+ dev_err(dev, "request FW failed for %s: %d\n",
+ adsp_elf, ret);
+ continue;
+ }
+
+ os_size = drv_data->adsp_mem[ADSP_OS_SIZE];
+ os_mem = drv_data->adsp_mem[ADSP_OS_ADDR] +
+ ((i + 1) * os_size);
+
+#if defined(CONFIG_TEGRA_NVADSP_ON_SMMU)
+ dram_va = nvadsp_dma_alloc_and_map_at(pdev,
+ (size_t)os_size, (dma_addr_t)os_mem,
+ GFP_KERNEL);
+ if (!dram_va) {
+ dev_err(dev,
+ "dma_alloc failed for 0x%x\n", os_mem);
+ continue;
+ }
+#else
+ dram_va = ioremap((phys_addr_t)os_mem, (size_t)os_size);
+ if (!dram_va) {
+ dev_err(dev,
+ "remap failed for addr 0x%x\n", os_mem);
+ continue;
+ }
+#endif
+
+ nvadsp_add_load_mappings((phys_addr_t)os_mem,
+ dram_va, (size_t)os_size);
+
+ dev_info(dev, "Loading ADSP OS firmware %s\n", adsp_elf);
+ ret = nvadsp_os_elf_load(fw);
+ if (ret) {
+ dev_err(dev, "failed to load %s\n", adsp_elf);
+ continue;
+ }
+
+ /* Shared mem is at the start of OS memory */
+ shrd_mem_iova = (dma_addr_t)os_mem;
+ }
+
+ nvadsp_set_shared_mem(pdev, dram_va, 0);
+
+ /* Store shared mem IOVA for writing into MBOX (for ADSP) */
+ hsp_va = devm_ioremap_resource(dev, &hsp_inst);
+ if (IS_ERR(hsp_va)) {
+ dev_err(dev, "ioremap failed for HSP %d\n", (i + 1));
+ continue;
+ }
+ mfw_smem_iova[i] = shrd_mem_iova;
+ mfw_hsp_va[i] = hsp_va;
+
+ /*
+ * Interrupt routing of AHSP1-3 is only for the
+ * sake of completion; CCPLEX<->ADSP communication
+ * is limited to AHSP0, i.e. ADSP core-0
+ */
+ for (j = 0; j < 8; j += 2) {
+ if (of_property_read_u32_index(hsp_node,
+ "nvidia,ahsp_sm_interrupts",
+ (i * 8) + j, &hsp_int)) {
+ dev_err(dev,
+ "no HSP int config for core %d\n",
+ (i + 1));
+ break;
+ }
+ if (of_property_read_u32_index(hsp_node,
+ "nvidia,ahsp_sm_interrupts",
+ (i * 8) + j + 1, &hsp_int_targ)) {
+ dev_err(dev,
+ "no HSP int_targ config for core %d\n",
+ (i + 1));
+ break;
+ }
+
+ /*
+ * DT definition decrements SPI IRQs
+ * by 32, so restore the same here
+ */
+ ret = tegra_agic_route_interrupt(hsp_int + 32,
+ hsp_int_targ);
+ if (ret) {
+ dev_err(dev,
+ "HSP routing for core %d failed: %d\n",
+ (i + 1), ret);
+ break;
+ }
+ }
+
+ if (j == 8)
+ dev_info(dev, "Setup done for core %d FW\n", (i + 1));
+ }
+
+ of_node_put(hsp_node);
+
+ return 0;
+}
+#endif // CONFIG_TEGRA_ADSP_MULTIPLE_FW
+
+int nvadsp_os_load(void)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev;
+ int ret = 0;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv.fw_load_lock);
+ if (priv.adsp_os_fw_loaded)
+ goto end;
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ dev = &priv.pdev->dev;
+
+#ifdef CONFIG_TEGRA_ADSP_MULTIPLE_FW
+ dev_info(dev, "Loading multiple ADSP FW....\n");
+ nvadsp_load_multi_fw(priv.pdev);
+#endif // CONFIG_TEGRA_ADSP_MULTIPLE_FW
+
+ if (drv_data->adsp_os_secload) {
+ dev_info(dev, "ADSP OS firmware already loaded\n");
+ ret = __nvadsp_os_secload(priv.pdev);
+ } else {
+ ret = nvadsp_firmware_load(priv.pdev);
+ }
+
+ if (ret == 0) {
+ priv.adsp_os_fw_loaded = true;
+#ifdef CONFIG_DEBUG_FS
+ wake_up(&priv.logger.wait_queue);
+#endif
+ }
+end:
+ mutex_unlock(&priv.fw_load_lock);
+ return ret;
+}
+EXPORT_SYMBOL(nvadsp_os_load);
+
+/*
+ * Static adsp freq to emc freq lookup table
+ *
+ * arg:
+ * adspfreq - adsp freq in KHz
+ * return:
+ * 0 - min emc freq
+ * > 0 - expected emc freq at this adsp freq
+ */
+#ifdef CONFIG_TEGRA_ADSP_DFS
+u32 adsp_to_emc_freq(u32 adspfreq)
+{
+ /*
+ * Vote on memory bus frequency based on adsp frequency
+ * cpu rate is in kHz, emc rate is in Hz
+ */
+ if (adspfreq >= 204800)
+ return 102000; /* adsp >= 204.8 MHz, emc 102 MHz */
+ else
+ return 0; /* emc min */
+}
+#endif
+
+static int nvadsp_set_ape_emc_freq(struct nvadsp_drv_data *drv_data)
+{
+ unsigned long ape_emc_freq;
+ struct device *dev = &priv.pdev->dev;
+ int ret;
+
+#ifdef CONFIG_TEGRA_ADSP_DFS
+ /* pass adsp freq in KHz. adsp_emc_freq in Hz */
+ ape_emc_freq = adsp_to_emc_freq(drv_data->adsp_freq / 1000) * 1000;
+#else
+ ape_emc_freq = drv_data->ape_emc_freq * 1000; /* in Hz */
+#endif
+ dev_dbg(dev, "requested adsp cpu freq %luKHz",
+ drv_data->adsp_freq / 1000);
+ dev_dbg(dev, "emc freq %luHz\n", ape_emc_freq / 1000);
+
+ /*
+ * ape_emc_freq is not required to set if adsp_freq
+ * is lesser than 204.8 MHz
+ */
+
+ if (!ape_emc_freq)
+ return 0;
+
+ ret = nvadsp_set_bw(drv_data, ape_emc_freq);
+ dev_dbg(dev, "ape.emc freq %luKHz\n",
+ tegra_bwmgr_get_emc_rate() / 1000);
+
+ return ret;
+}
+
+static int nvadsp_set_ape_freq(struct nvadsp_drv_data *drv_data)
+{
+ unsigned long ape_freq = drv_data->ape_freq * 1000; /* in Hz*/
+ struct device *dev = &priv.pdev->dev;
+ int ret;
+
+#ifdef CONFIG_TEGRA_ADSP_ACTMON
+ ape_freq = drv_data->adsp_freq / ADSP_TO_APE_CLK_RATIO;
+#endif
+ dev_dbg(dev, "ape freq %luKHz", ape_freq / 1000);
+
+ if (!ape_freq)
+ return 0;
+
+ ret = clk_set_rate(drv_data->ape_clk, ape_freq);
+
+ dev_dbg(dev, "ape freq %luKHz\n",
+ clk_get_rate(drv_data->ape_clk) / 1000);
+ return ret;
+}
+
+static int nvadsp_t210_set_clks_and_prescalar(struct nvadsp_drv_data *drv_data)
+{
+ struct nvadsp_shared_mem *shared_mem = drv_data->shared_adsp_os_data;
+ struct nvadsp_os_args *os_args = &shared_mem->os_args;
+ struct device *dev = &priv.pdev->dev;
+ unsigned long max_adsp_freq;
+ unsigned long adsp_freq;
+ u32 max_index;
+ u32 cur_index;
+ int ret = 0;
+
+ adsp_freq = drv_data->adsp_freq * 1000; /* in Hz*/
+
+ max_adsp_freq = clk_round_rate(drv_data->adsp_cpu_abus_clk,
+ ULONG_MAX);
+ max_index = max_adsp_freq / MIN_ADSP_FREQ;
+ cur_index = adsp_freq / MIN_ADSP_FREQ;
+
+
+ if (!adsp_freq)
+ /* Set max adsp boot freq */
+ cur_index = max_index;
+
+ if (adsp_freq % MIN_ADSP_FREQ) {
+ if (cur_index >= max_index)
+ cur_index = max_index;
+ else
+ cur_index++;
+ } else if (cur_index >= max_index)
+ cur_index = max_index;
+
+ /*
+ * timer interval = (prescalar + 1) * (count + 1) / periph_freq
+ * therefore for 0 count,
+ * 1 / TIMER_CLK_HZ = (prescalar + 1) / periph_freq
+ * Hence, prescalar = periph_freq / TIMER_CLK_HZ - 1
+ */
+ os_args->timer_prescalar = cur_index - 1;
+
+ adsp_freq = cur_index * MIN_ADSP_FREQ;
+
+ ret = clk_set_rate(drv_data->adsp_cpu_abus_clk, adsp_freq);
+ if (ret)
+ goto end;
+
+ drv_data->adsp_freq = adsp_freq / 1000; /* adsp_freq in KHz*/
+ drv_data->adsp_freq_hz = adsp_freq;
+
+ /* adspos uses os_args->adsp_freq_hz for EDF */
+ os_args->adsp_freq_hz = adsp_freq;
+
+end:
+ dev_dbg(dev, "adsp cpu freq %luKHz\n",
+ clk_get_rate(drv_data->adsp_cpu_abus_clk) / 1000);
+ dev_dbg(dev, "timer prescalar %x\n", os_args->timer_prescalar);
+
+ return ret;
+}
+
+static int nvadsp_set_adsp_clks(struct nvadsp_drv_data *drv_data)
+{
+ struct nvadsp_shared_mem *shared_mem = drv_data->shared_adsp_os_data;
+ struct nvadsp_os_args *os_args = &shared_mem->os_args;
+ struct platform_device *pdev = drv_data->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned long max_adsp_freq;
+ unsigned long adsp_freq;
+ int ret = 0;
+
+ adsp_freq = drv_data->adsp_freq_hz; /* in Hz*/
+
+ /* round rate shall be used with adsp parent clk i.e. aclk */
+ max_adsp_freq = clk_round_rate(drv_data->aclk_clk, ULONG_MAX);
+
+ /* Set max adsp boot freq */
+ if (!adsp_freq)
+ adsp_freq = max_adsp_freq;
+
+ /* set rate shall be used with adsp parent clk i.e. aclk */
+ ret = clk_set_rate(drv_data->aclk_clk, adsp_freq);
+ if (ret) {
+ dev_err(dev, "setting adsp_freq:%luHz failed.\n", adsp_freq);
+ dev_err(dev, "max_adsp_freq:%luHz\n", max_adsp_freq);
+ goto end;
+ }
+
+ drv_data->adsp_freq = adsp_freq / 1000; /* adsp_freq in KHz*/
+ drv_data->adsp_freq_hz = adsp_freq;
+
+ /* adspos uses os_args->adsp_freq_hz for EDF */
+ os_args->adsp_freq_hz = adsp_freq;
+end:
+ dev_dbg(dev, "adsp cpu freq %luKHz\n",
+ clk_get_rate(drv_data->adsp_clk) / 1000);
+ return ret;
+}
+
+static int __deassert_adsp(struct nvadsp_drv_data *d)
+{
+ struct platform_device *pdev = d->pdev;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ /*
+ * The ADSP_ALL reset in BPMP-FW is overloaded to de-assert
+ * all 7 resets i.e. ADSP, ADSPINTF, ADSPDBG, ADSPNEON, ADSPPERIPH,
+ * ADSPSCU and ADSPWDT resets. The BPMP-FW also takes care
+ * of specific de-assert sequence and delays between them.
+ * So de-resetting only ADSP reset is sufficient to de-reset
+ * all ADSP sub-modules.
+ */
+ ret = reset_control_deassert(d->adspall_rst);
+ if (ret)
+ dev_err(dev, "failed to deassert adsp\n");
+
+ return ret;
+}
+
+static int nvadsp_deassert_adsp(struct nvadsp_drv_data *drv_data)
+{
+ int ret = -EINVAL;
+
+ if (drv_data->deassert_adsp)
+ ret = drv_data->deassert_adsp(drv_data);
+
+ return ret;
+}
+
+static int __assert_adsp(struct nvadsp_drv_data *d)
+{
+ struct platform_device *pdev = d->pdev;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ /*
+ * The ADSP_ALL reset in BPMP-FW is overloaded to assert
+ * all 7 resets i.e. ADSP, ADSPINTF, ADSPDBG, ADSPNEON,
+ * ADSPPERIPH, ADSPSCU and ADSPWDT resets. So resetting
+ * only ADSP reset is sufficient to reset all ADSP sub-modules.
+ */
+ ret = reset_control_assert(d->adspall_rst);
+ if (ret)
+ dev_err(dev, "failed to assert adsp\n");
+
+ return ret;
+}
+
+static int nvadsp_assert_adsp(struct nvadsp_drv_data *drv_data)
+{
+ int ret = -EINVAL;
+
+ if (drv_data->assert_adsp)
+ ret = drv_data->assert_adsp(drv_data);
+
+ return ret;
+}
+
+static int nvadsp_set_boot_freqs(struct nvadsp_drv_data *drv_data)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+
+ /* on Unit-FPGA do not set clocks, return success */
+ if (drv_data->adsp_unit_fpga)
+ return 0;
+
+ if (of_device_is_compatible(node, "nvidia,tegra210-adsp")) {
+ if (drv_data->adsp_cpu_abus_clk) {
+ ret = nvadsp_t210_set_clks_and_prescalar(drv_data);
+ if (ret)
+ goto end;
+ } else {
+ ret = -EINVAL;
+ goto end;
+ }
+ } else {
+ if (drv_data->adsp_clk) {
+ ret = nvadsp_set_adsp_clks(drv_data);
+ if (ret)
+ goto end;
+ } else {
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ if (drv_data->ape_clk) {
+ ret = nvadsp_set_ape_freq(drv_data);
+ if (ret)
+ goto end;
+ }
+ if (drv_data->bwmgr) {
+ ret = nvadsp_set_ape_emc_freq(drv_data);
+ if (ret)
+ goto end;
+ }
+end:
+ return ret;
+}
+
+static int wait_for_adsp_os_load_complete(void)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(priv.pdev);
+ uint32_t timeout, data;
+ status_t ret;
+
+ timeout = drv_data->adsp_load_timeout;
+ if (!timeout)
+ timeout = ADSP_OS_LOAD_TIMEOUT;
+
+ ret = nvadsp_mbox_recv(&adsp_com_mbox, &data,
+ true, timeout);
+ if (ret) {
+ dev_err(dev, "ADSP OS loading timed out\n");
+ goto end;
+ }
+ dev_dbg(dev, "ADSP has been %s\n",
+ data == ADSP_OS_BOOT_COMPLETE ? "BOOTED" : "RESUMED");
+
+ switch (data) {
+ case ADSP_OS_BOOT_COMPLETE:
+ ret = load_adsp_static_apps();
+ break;
+ case ADSP_OS_RESUME:
+ default:
+ break;
+ }
+end:
+ return ret;
+}
+
+static int __nvadsp_os_start(void)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev;
+ int ret = 0;
+
+ dev = &priv.pdev->dev;
+ drv_data = platform_get_drvdata(priv.pdev);
+
+
+ dev_dbg(dev, "ADSP is booting on %s\n",
+ drv_data->adsp_unit_fpga ? "UNIT-FPGA" : "SILICON");
+
+ nvadsp_assert_adsp(drv_data);
+
+ if (!drv_data->adsp_os_secload) {
+ dev_dbg(dev, "Copying EVP...\n");
+ copy_io_in_l(drv_data->state.evp_ptr,
+ drv_data->state.evp,
+ AMC_EVP_SIZE);
+ }
+
+ dev_dbg(dev, "Setting freqs\n");
+ ret = nvadsp_set_boot_freqs(drv_data);
+ if (ret) {
+ dev_err(dev, "failed to set boot freqs\n");
+ goto end;
+ }
+
+ dev_dbg(dev, "De-asserting adsp\n");
+ ret = nvadsp_deassert_adsp(drv_data);
+ if (ret) {
+ dev_err(dev, "failed to deassert ADSP\n");
+ goto end;
+ }
+
+ dev_dbg(dev, "Waiting for ADSP OS to boot up...\n");
+
+ ret = wait_for_adsp_os_load_complete();
+ if (ret) {
+ dev_err(dev, "Unable to start ADSP OS\n");
+ goto end;
+ }
+ dev_dbg(dev, "ADSP OS boot up... Done!\n");
+
+#ifdef CONFIG_TEGRA_ADSP_DFS
+ ret = adsp_dfs_core_init(priv.pdev);
+ if (ret) {
+ dev_err(dev, "adsp dfs initialization failed\n");
+ goto err;
+ }
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_ACTMON
+ ret = ape_actmon_init(priv.pdev);
+ if (ret) {
+ dev_err(dev, "ape actmon initialization failed\n");
+ goto err;
+ }
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
+ ret = adsp_cpustat_init(priv.pdev);
+ if (ret) {
+ dev_err(dev, "adsp cpustat initialisation failed\n");
+ goto err;
+ }
+#endif
+end:
+ return ret;
+
+#if defined(CONFIG_TEGRA_ADSP_DFS) || defined(CONFIG_TEGRA_ADSP_CPUSTAT)
+err:
+ __nvadsp_os_stop(true);
+ return ret;
+#endif
+}
+
+static void dump_adsp_logs(void)
+{
+ int i = 0;
+ char buff[DUMP_BUFF] = { };
+ int buff_iter = 0;
+ char last_char;
+ struct nvadsp_debug_log *logger = &priv.logger;
+ struct device *dev = &priv.pdev->dev;
+ char *ptr = logger->debug_ram_rdr;
+
+ dev_err(dev, "Dumping ADSP logs ........\n");
+
+ for (i = 0; i < logger->debug_ram_sz; i++) {
+ last_char = *(ptr + i);
+ if ((last_char != EOT) && (last_char != 0)) {
+ if ((last_char == '\n') || (last_char == '\r') ||
+ (buff_iter == DUMP_BUFF)) {
+ dev_err(dev, "[ADSP OS] %s\n", buff);
+ memset(buff, 0, sizeof(buff));
+ buff_iter = 0;
+ } else {
+ buff[buff_iter++] = last_char;
+ }
+ }
+ }
+ dev_err(dev, "End of ADSP log dump .....\n");
+}
+
+static void print_agic_irq_states(void)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(priv.pdev);
+ int start_irq = drv_data->chip_data->start_irq;
+ int end_irq = drv_data->chip_data->end_irq;
+ struct device *dev = &priv.pdev->dev;
+ int i;
+
+ for (i = start_irq; i < end_irq; i++) {
+ dev_info(dev, "irq %d is %s and %s\n", i,
+ tegra_agic_irq_is_pending(i) ?
+ "pending" : "not pending",
+ tegra_agic_irq_is_active(i) ?
+ "active" : "not active");
+ }
+}
+
+static void print_arm_mode_regs(void)
+{
+ struct nvadsp_exception_context *excep_context;
+ struct arm_fault_frame_shared *shared_frame;
+ struct arm_mode_regs_shared *shared_regs;
+ struct nvadsp_shared_mem *shared_mem;
+ struct device *dev = &priv.pdev->dev;
+ struct nvadsp_drv_data *drv_data;
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ shared_mem = drv_data->shared_adsp_os_data;
+ excep_context = &shared_mem->exception_context;
+ shared_frame = &excep_context->frame;
+ shared_regs = &excep_context->regs;
+
+ dev_err(dev, "dumping arm mode register data...\n");
+ dev_err(dev, "%c fiq r13 0x%08x r14 0x%08x\n",
+ ((shared_frame->spsr & MODE_MASK) == MODE_FIQ) ? '*' : ' ',
+ shared_regs->fiq_r13, shared_regs->fiq_r14);
+ dev_err(dev, "%c irq r13 0x%08x r14 0x%08x\n",
+ ((shared_frame->spsr & MODE_MASK) == MODE_IRQ) ? '*' : ' ',
+ shared_regs->irq_r13, shared_regs->irq_r14);
+ dev_err(dev, "%c svc r13 0x%08x r14 0x%08x\n",
+ ((shared_frame->spsr & MODE_MASK) == MODE_SVC) ? '*' : ' ',
+ shared_regs->svc_r13, shared_regs->svc_r14);
+ dev_err(dev, "%c und r13 0x%08x r14 0x%08x\n",
+ ((shared_frame->spsr & MODE_MASK) == MODE_UND) ? '*' : ' ',
+ shared_regs->und_r13, shared_regs->und_r14);
+ dev_err(dev, "%c sys r13 0x%08x r14 0x%08x\n",
+ ((shared_frame->spsr & MODE_MASK) == MODE_SYS) ? '*' : ' ',
+ shared_regs->sys_r13, shared_regs->sys_r14);
+ dev_err(dev, "%c abt r13 0x%08x r14 0x%08x\n",
+ ((shared_frame->spsr & MODE_MASK) == MODE_ABT) ? '*' : ' ',
+ shared_regs->abt_r13, shared_regs->abt_r14);
+}
+
+static void print_arm_fault_frame(void)
+{
+ struct nvadsp_exception_context *excep_context;
+ struct arm_fault_frame_shared *shared_frame;
+ struct nvadsp_shared_mem *shared_mem;
+ struct device *dev = &priv.pdev->dev;
+ struct nvadsp_drv_data *drv_data;
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ shared_mem = drv_data->shared_adsp_os_data;
+ excep_context = &shared_mem->exception_context;
+ shared_frame = &excep_context->frame;
+
+ dev_err(dev, "dumping fault frame...\n");
+ dev_err(dev, "r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n",
+ shared_frame->r[0], shared_frame->r[1], shared_frame->r[2],
+ shared_frame->r[3]);
+ dev_err(dev, "r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
+ shared_frame->r[4], shared_frame->r[5], shared_frame->r[6],
+ shared_frame->r[7]);
+ dev_err(dev, "r8 0x%08x r9 0x%08x r10 0x%08x r11 0x%08x\n",
+ shared_frame->r[8], shared_frame->r[9], shared_frame->r[10],
+ shared_frame->r[11]);
+ dev_err(dev, "r12 0x%08x usp 0x%08x ulr 0x%08x pc 0x%08x\n",
+ shared_frame->r[12], shared_frame->usp, shared_frame->ulr,
+ shared_frame->pc);
+ dev_err(dev, "spsr 0x%08x\n", shared_frame->spsr);
+
+}
+
+static void dump_thread_name(struct platform_device *pdev, u32 val)
+{
+ dev_info(&pdev->dev, "%s: adsp current thread: %c%c%c%c\n",
+ __func__,
+ (val >> 24) & 0xFF, (val >> 16) & 0xFF,
+ (val >> 8) & 0xFF, (val >> 0) & 0xFF);
+}
+
+static void dump_irq_num(struct platform_device *pdev, u32 val)
+{
+ dev_info(&pdev->dev, "%s: adsp current/last irq : %d\n",
+ __func__, val);
+}
+
+static void get_adsp_state(void)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev;
+ uint32_t val;
+ const char *msg;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ return;
+ }
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ dev = &priv.pdev->dev;
+
+ if (drv_data->chip_data->adsp_state_hwmbox == 0) {
+ dev_info(dev, "%s: No state hwmbox available\n", __func__);
+ return;
+ }
+
+ val = hwmbox_readl(drv_data->chip_data->adsp_state_hwmbox);
+ dev_info(dev, "%s: adsp state hwmbox value: 0x%X\n", __func__, val);
+
+ switch (val) {
+
+ case ADSP_LOADER_MAIN_ENTRY:
+ msg = "loader_main: entry to loader_main";
+ break;
+ case ADSP_LOADER_MAIN_CACHE_DISABLE_COMPLETE:
+ msg = "loader_main: Cache has been disabled";
+ break;
+ case ADSP_LOADER_MAIN_CONFIGURE_MMU_COMPLETE:
+ msg = "loader_main: MMU configuration is complete";
+ break;
+ case ADSP_LOADER_MAIN_CACHE_ENABLE_COMPLETE:
+ msg = "loader_main: Cache has been enabled";
+ break;
+ case ADSP_LOADER_MAIN_FPU_ENABLE_COMPLETE:
+ msg = "loader_main: FPU has been enabled";
+ break;
+ case ADSP_LOADER_MAIN_DECOMPRESSION_COMPLETE:
+ msg = "loader_main: ADSP FW decompression is complete";
+ break;
+ case ADSP_LOADER_MAIN_EXIT:
+ msg = "loader_main: exiting loader_main function";
+ break;
+
+ case ADSP_START_ENTRY_AT_RESET:
+ msg = "start: ADSP is at reset";
+ break;
+ case ADSP_START_CPU_EARLY_INIT:
+ msg = "start: ADSP to do cpu_early_init";
+ break;
+ case ADSP_START_FIRST_BOOT:
+ msg = "start: ADSP is booting for first time,"
+ "initializing DATA and clearing BSS";
+ break;
+ case ADSP_START_LK_MAIN_ENTRY:
+ msg = "start: ADSP about to enter lk_main";
+ break;
+
+ case ADSP_LK_MAIN_ENTRY:
+ msg = "lk_main: entry to lk_main";
+ break;
+ case ADSP_LK_MAIN_EARLY_THREAD_INIT_COMPLETE:
+ msg = "lk_main: early_thread_init has been completed";
+ break;
+ case ADSP_LK_MAIN_EARLY_ARCH_INIT_COMPLETE:
+ msg = "lk_main: early_arch_init has been completed";
+ break;
+ case ADSP_LK_MAIN_EARLY_PLATFORM_INIT_COMPLETE:
+ msg = "lk_main: early_platform_init has been completed";
+ break;
+ case ADSP_LK_MAIN_EARLY_TARGET_INIT_COMPLETE:
+ msg = "lk_main: early_target_init has been completed";
+ break;
+ case ADSP_LK_MAIN_CONSTRUCTOR_INIT_COMPLETE:
+ msg = "lk_main: constructors has been called";
+ break;
+ case ADSP_LK_MAIN_HEAP_INIT_COMPLETE:
+ msg = "lk_main: heap has been initialized";
+ break;
+ case ADSP_LK_MAIN_KERNEL_INIT_COMPLETE:
+ msg = "lk_main: ADSP kernel has been initialized";
+ break;
+ case ADSP_LK_MAIN_CPU_RESUME_ENTRY:
+ msg = "lk_main: ADSP is about to resume from suspend";
+ break;
+
+ case ADSP_BOOTSTRAP2_ARCH_INIT_COMPLETE:
+ msg = "bootstrap2: ADSP arch_init is complete";
+ break;
+ case ADSP_BOOTSTRAP2_PLATFORM_INIT_COMPLETE:
+ msg = "bootstrap2: platform has been initialized";
+ break;
+ case ADSP_BOOTSTRAP2_TARGET_INIT_COMPLETE:
+ msg = "bootstrap2: target has been initialized";
+ break;
+ case ADSP_BOOTSTRAP2_APP_MODULE_INIT_COMPLETE:
+ msg = "bootstrap2: APP modules initialized";
+ break;
+ case ADSP_BOOTSTRAP2_APP_INIT_COMPLETE:
+ msg = "bootstrap2: APP init is complete";
+ break;
+ case ADSP_BOOTSTRAP2_STATIC_APP_INIT_COMPLETE:
+ msg = "bootstrap2: Static apps has been initialized";
+ break;
+ case ADSP_BOOTSTRAP2_OS_LOAD_COMPLETE:
+ msg = "bootstrap2: ADSP OS successfully loaded";
+ break;
+ case ADSP_SUSPEND_BEGINS:
+ msg = "suspend: begins";
+ break;
+ case ADSP_SUSPEND_MBX_SEND_COMPLETE:
+ msg = "suspend: mbox send complete";
+ break;
+ case ADSP_SUSPEND_DISABLED_TIMERS:
+ msg = "suspend: timers disabled";
+ break;
+ case ADSP_SUSPEND_DISABLED_INTS:
+ msg = "suspend: interrupts disabled";
+ break;
+ case ADSP_SUSPEND_ARAM_SAVED:
+ msg = "suspend: aram saved";
+ break;
+ case ADSP_SUSPEND_AMC_SAVED:
+ msg = "suspend: amc saved";
+ break;
+ case ADSP_SUSPEND_AMISC_SAVED:
+ msg = "suspend: amisc saved";
+ break;
+ case ADSP_SUSPEND_L1_CACHE_DISABLED:
+ msg = "suspend: l1 cache disabled";
+ break;
+ case ADSP_SUSPEND_L2_CACHE_DISABLED:
+ msg = "suspend: l2 cache disabled";
+ break;
+ case ADSP_RESUME_ADSP:
+ msg = "resume: beings";
+ break;
+ case ADSP_RESUME_AMISC_RESTORED:
+ msg = "resume: amisc restored";
+ break;
+ case ADSP_RESUME_AMC_RESTORED:
+ msg = "resume: amc restored";
+ break;
+ case ADSP_RESUME_ARAM_RESTORED:
+ msg = "resume: aram restored";
+ break;
+ case ADSP_RESUME_COMPLETE:
+ msg = "resume: complete";
+ break;
+ case ADSP_WFI_ENTER:
+ msg = "WFI: Entering WFI";
+ break;
+ case ADSP_WFI_EXIT:
+ msg = "WFI: Exiting WFI, Failed to Enter";
+ break;
+ case ADSP_DFS_MBOX_RECV:
+ msg = "DFS: mbox received";
+ break;
+ case ADSP_DFS_MBOX_SENT:
+ msg = "DFS: mbox sent";
+ break;
+ default:
+ msg = "Unrecognized ADSP state!!";
+ break;
+ }
+
+ dev_info(dev, "%s: %s\n", __func__, msg);
+
+ val = hwmbox_readl(drv_data->chip_data->adsp_thread_hwmbox);
+ dump_thread_name(priv.pdev, val);
+
+ val = hwmbox_readl(drv_data->chip_data->adsp_irq_hwmbox);
+ dump_irq_num(priv.pdev, val);
+}
+
+
+void dump_adsp_sys(void)
+{
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ return;
+ }
+
+ dump_adsp_logs();
+ dump_mailbox_regs();
+ print_arm_fault_frame();
+ print_arm_mode_regs();
+ get_adsp_state();
+ if (nvadsp_tegra_adma_dump_ch_reg)
+ (*nvadsp_tegra_adma_dump_ch_reg)();
+ print_agic_irq_states();
+}
+EXPORT_SYMBOL(dump_adsp_sys);
+
+static void nvadsp_free_os_interrupts(struct nvadsp_os_data *priv)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(priv->pdev);
+ unsigned int wdt_virq = drv_data->agic_irqs[WDT_VIRQ];
+ unsigned int wfi_virq = drv_data->agic_irqs[WFI_VIRQ];
+ struct device *dev = &priv->pdev->dev;
+
+ devm_free_irq(dev, wdt_virq, priv);
+ devm_free_irq(dev, wfi_virq, priv);
+}
+
+static int nvadsp_setup_os_interrupts(struct nvadsp_os_data *priv)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(priv->pdev);
+ unsigned int wdt_virq = drv_data->agic_irqs[WDT_VIRQ];
+ unsigned int wfi_virq = drv_data->agic_irqs[WFI_VIRQ];
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ ret = devm_request_irq(dev, wdt_virq, adsp_wdt_handler,
+ IRQF_TRIGGER_RISING, "adsp watchdog", priv);
+ if (ret) {
+ dev_err(dev, "failed to get adsp watchdog interrupt\n");
+ goto end;
+ }
+
+ ret = devm_request_irq(dev, wfi_virq, adsp_wfi_handler,
+ IRQF_TRIGGER_RISING, "adsp wfi", priv);
+ if (ret) {
+ dev_err(dev, "cannot request for wfi interrupt\n");
+ goto free_interrupts;
+ }
+
+ end:
+
+ return ret;
+
+ free_interrupts:
+ nvadsp_free_os_interrupts(priv);
+ return ret;
+}
+
+static void free_interrupts(struct nvadsp_os_data *priv)
+{
+ nvadsp_free_os_interrupts(priv);
+ nvadsp_free_hwmbox_interrupts(priv->pdev);
+ nvadsp_free_amc_interrupts(priv->pdev);
+}
+
+static int setup_interrupts(struct nvadsp_os_data *priv)
+{
+ int ret;
+
+ ret = nvadsp_setup_os_interrupts(priv);
+ if (ret)
+ goto err;
+
+ ret = nvadsp_setup_hwmbox_interrupts(priv->pdev);
+ if (ret)
+ goto free_os_interrupts;
+ ret = nvadsp_setup_amc_interrupts(priv->pdev);
+ if (ret)
+ goto free_hwmbox_interrupts;
+
+ return ret;
+
+ free_hwmbox_interrupts:
+ nvadsp_free_hwmbox_interrupts(priv->pdev);
+ free_os_interrupts:
+ nvadsp_free_os_interrupts(priv);
+ err:
+ return ret;
+}
+
+void nvadsp_set_adma_dump_reg(void (*cb_adma_regdump)(void))
+{
+ nvadsp_tegra_adma_dump_ch_reg = cb_adma_regdump;
+ pr_info("%s: callback for adma reg dump is sent to %p\n",
+ __func__, nvadsp_tegra_adma_dump_ch_reg);
+}
+EXPORT_SYMBOL(nvadsp_set_adma_dump_reg);
+
+int nvadsp_os_start(void)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev;
+ int ret = 0;
+ static int cold_start = 1;
+ u8 chip_id;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ dev = &priv.pdev->dev;
+
+ /* check if fw is loaded then start the adsp os */
+ if (!priv.adsp_os_fw_loaded) {
+ dev_err(dev, "Call to nvadsp_os_load not made\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&priv.os_run_lock);
+ /* if adsp is started/running exit gracefully */
+ if (priv.os_running)
+ goto unlock;
+
+#ifdef CONFIG_PM
+ ret = pm_runtime_get_sync(&priv.pdev->dev);
+ if (ret < 0)
+ goto unlock;
+#endif
+ ret = setup_interrupts(&priv);
+ if (ret < 0)
+ goto unlock;
+
+ if (cold_start) {
+ if (drv_data->chip_data->adsp_shared_mem_hwmbox != 0) {
+#ifdef CONFIG_TEGRA_ADSP_MULTIPLE_FW
+ int i;
+ for (i = 0; i < MFW_MAX_OTHER_CORES; i++) {
+ if (mfw_hsp_va[i]) {
+ writel((uint32_t)mfw_smem_iova[i],
+ mfw_hsp_va[i] +
+ drv_data->chip_data->
+ adsp_shared_mem_hwmbox
+ );
+ }
+ }
+#endif // CONFIG_TEGRA_ADSP_MULTIPLE_FW
+
+ hwmbox_writel(
+ (uint32_t)drv_data->shared_adsp_os_data_iova,
+ drv_data->chip_data->adsp_shared_mem_hwmbox);
+ }
+
+ if (!is_tegra_hypervisor_mode() &&
+ drv_data->chip_data->adsp_os_config_hwmbox != 0) {
+ /* Set ADSP to do decompression */
+ uint32_t val = (ADSP_CONFIG_DECOMPRESS_EN <<
+ ADSP_CONFIG_DECOMPRESS_SHIFT);
+
+ /* Write to HWMBOX5 */
+ hwmbox_writel(val,
+ drv_data->chip_data->adsp_os_config_hwmbox);
+ }
+
+ /* Write ACSR base address and decompr enable flag only once */
+ cold_start = 0;
+ }
+
+ if (drv_data->chip_data->hwmb.hwmbox1_reg != 0) {
+ chip_id = tegra_get_chip_id();
+ /* Write chip id info to HWMBOX1 to enable ast config
+ * later for t186/t196
+ */
+ if (chip_id != 0) {
+ hwmbox_writel((uint32_t)chip_id,
+ drv_data->chip_data->hwmb.hwmbox1_reg);
+ } else {
+ dev_err(dev, "chip id is NULL\n");
+ ret = -EINVAL;
+ free_interrupts(&priv);
+#ifdef CONFIG_PM
+ pm_runtime_put_sync(&priv.pdev->dev);
+#endif
+ goto unlock;
+ }
+ }
+
+ ret = __nvadsp_os_start();
+ if (ret) {
+ priv.os_running = drv_data->adsp_os_running = false;
+ /* if start fails call pm suspend of adsp driver */
+ dev_err(dev, "adsp failed to boot with ret = %d\n", ret);
+ dump_adsp_sys();
+ free_interrupts(&priv);
+#ifdef CONFIG_PM
+ pm_runtime_put_sync(&priv.pdev->dev);
+#endif
+ goto unlock;
+
+ }
+ priv.os_running = drv_data->adsp_os_running = true;
+ priv.num_start++;
+#if defined(CONFIG_TEGRA_ADSP_FILEIO)
+ if ((drv_data->adsp_os_secload) && (!drv_data->adspff_init)) {
+ int adspff_status = adspff_init(priv.pdev);
+
+ if (adspff_status) {
+ if (adspff_status != -ENOENT) {
+ priv.os_running = drv_data->adsp_os_running = false;
+ dev_err(dev,
+ "adsp boot failed at adspff init with ret = %d",
+ adspff_status);
+ dump_adsp_sys();
+ free_interrupts(&priv);
+#ifdef CONFIG_PM
+ pm_runtime_put_sync(&priv.pdev->dev);
+#endif
+ ret = adspff_status;
+ goto unlock;
+ }
+ } else
+ drv_data->adspff_init = true;
+ }
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_LPTHREAD
+ if (!drv_data->lpthread_initialized) {
+ ret = adsp_lpthread_entry(priv.pdev);
+ if (ret)
+ dev_err(dev, "adsp_lpthread_entry failed ret = %d\n",
+ ret);
+ }
+#endif
+
+ drv_data->adsp_os_suspended = false;
+#ifdef CONFIG_DEBUG_FS
+ wake_up(&priv.logger.wait_queue);
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_LPTHREAD
+ adsp_lpthread_set_suspend(drv_data->adsp_os_suspended);
+#endif
+
+unlock:
+ mutex_unlock(&priv.os_run_lock);
+end:
+ return ret;
+}
+EXPORT_SYMBOL(nvadsp_os_start);
+
+static int __nvadsp_os_suspend(void)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct nvadsp_drv_data *drv_data;
+ int ret, cnt = 0;
+ u32 adsp_status;
+
+ drv_data = platform_get_drvdata(priv.pdev);
+
+#ifdef CONFIG_TEGRA_ADSP_ACTMON
+ ape_actmon_exit(priv.pdev);
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_DFS
+ adsp_dfs_core_exit(priv.pdev);
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
+ adsp_cpustat_exit(priv.pdev);
+#endif
+
+ ret = nvadsp_mbox_send(&adsp_com_mbox, ADSP_OS_SUSPEND,
+ NVADSP_MBOX_SMSG, true, UINT_MAX);
+ if (ret) {
+ dev_err(dev, "failed to send with adsp com mbox\n");
+ goto out;
+ }
+
+ dev_dbg(dev, "Waiting for ADSP OS suspend...\n");
+ ret = wait_for_completion_timeout(&entered_wfi,
+ msecs_to_jiffies(ADSP_WFI_TIMEOUT));
+ if (WARN_ON(ret <= 0)) {
+ dev_err(dev, "Unable to suspend ADSP OS err = %d\n", ret);
+ ret = (ret < 0) ? ret : -ETIMEDOUT;
+ goto out;
+ }
+
+ /*
+ * Check L2_IDLE and L2_CLKSTOPPED in ADSP_STATUS
+ * NOTE: Standby mode in ADSP L2CC Power Control
+ * register should be enabled for this
+ */
+ do {
+ adsp_status = amisc_readl(drv_data, AMISC_ADSP_STATUS);
+ if ((adsp_status & AMISC_ADSP_L2_IDLE) &&
+ (adsp_status & AMISC_ADSP_L2_CLKSTOPPED))
+ break;
+ cnt++;
+ mdelay(1);
+ } while (cnt < 5);
+ if (cnt >= 5) {
+ dev_err(dev, "ADSP L2C clock not halted: 0x%x\n", adsp_status);
+ ret = -EDEADLK;
+ goto out;
+ }
+
+ ret = 0;
+ dev_dbg(dev, "ADSP OS suspended!\n");
+
+ drv_data->adsp_os_suspended = true;
+
+#ifdef CONFIG_TEGRA_ADSP_LPTHREAD
+ adsp_lpthread_set_suspend(drv_data->adsp_os_suspended);
+#endif
+
+ nvadsp_assert_adsp(drv_data);
+
+ out:
+ return ret;
+}
+
+static void __nvadsp_os_stop(bool reload)
+{
+ const struct firmware *fw = priv.os_firmware;
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev;
+ int err = 0;
+
+ dev = &priv.pdev->dev;
+ drv_data = platform_get_drvdata(priv.pdev);
+
+#ifdef CONFIG_TEGRA_ADSP_ACTMON
+ ape_actmon_exit(priv.pdev);
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_DFS
+ adsp_dfs_core_exit(priv.pdev);
+#endif
+
+#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
+ adsp_cpustat_exit(priv.pdev);
+#endif
+#if defined(CONFIG_TEGRA_ADSP_FILEIO)
+ if (drv_data->adspff_init) {
+ adspff_exit();
+ drv_data->adspff_init = false;
+ }
+#endif
+
+ err = nvadsp_mbox_send(&adsp_com_mbox,
+ ADSP_OS_STOP,
+ NVADSP_MBOX_SMSG, true, UINT_MAX);
+ if (err)
+ dev_err(dev, "failed to send stop msg to adsp\n");
+ err = wait_for_completion_timeout(&entered_wfi,
+ msecs_to_jiffies(ADSP_WFI_TIMEOUT));
+
+ /*
+ * ADSP needs to be in WFI/WFE state to properly reset it.
+ * However, when ADSPOS is getting stopped on error path,
+ * it cannot gaurantee that ADSP is in WFI/WFE state.
+ * Reset it in either case. On failure, whole APE reset is
+ * required (happens on next APE power domain cycle).
+ */
+ nvadsp_assert_adsp(drv_data);
+
+ /* Don't reload ADSPOS if ADSP state is not WFI/WFE */
+ if (WARN_ON(err <= 0)) {
+ dev_err(dev, "%s: unable to enter wfi state err = %d\n",
+ __func__, err);
+ goto end;
+ }
+
+ if (reload && !drv_data->adsp_os_secload) {
+ struct nvadsp_debug_log *logger = &priv.logger;
+
+#ifdef CONFIG_DEBUG_FS
+ wake_up(&logger->wait_queue);
+ /* wait for LOGGER_TIMEOUT to complete filling the buffer */
+ wait_for_completion_timeout(&logger->complete,
+ msecs_to_jiffies(LOGGER_COMPLETE_TIMEOUT));
+#endif
+ /*
+ * move ram iterator to 0, since after restart the iterator
+ * will be pointing to initial position of start.
+ */
+ logger->debug_ram_rdr[0] = EOT;
+ logger->ram_iter = 0;
+ /* load a fresh copy of adsp.elf */
+ if (nvadsp_os_elf_load(fw))
+ dev_err(dev, "failed to reload %s\n",
+ drv_data->adsp_elf);
+ }
+
+ end:
+ return;
+}
+
+
+void nvadsp_os_stop(void)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ return;
+ }
+
+ dev = &priv.pdev->dev;
+ drv_data = platform_get_drvdata(priv.pdev);
+
+ mutex_lock(&priv.os_run_lock);
+ /* check if os is running else exit */
+ if (!priv.os_running)
+ goto end;
+
+ __nvadsp_os_stop(true);
+
+ priv.os_running = drv_data->adsp_os_running = false;
+
+ free_interrupts(&priv);
+#ifdef CONFIG_PM
+ if (pm_runtime_put_sync(dev) < 0)
+ dev_err(dev, "failed in pm_runtime_put_sync\n");
+#endif
+end:
+ mutex_unlock(&priv.os_run_lock);
+}
+EXPORT_SYMBOL(nvadsp_os_stop);
+
+int nvadsp_os_suspend(void)
+{
+ struct nvadsp_drv_data *drv_data;
+ int ret = -EINVAL;
+
+ if (!priv.pdev) {
+ pr_err("ADSP Driver is not initialized\n");
+ goto end;
+ }
+
+ drv_data = platform_get_drvdata(priv.pdev);
+
+ mutex_lock(&priv.os_run_lock);
+ /* check if os is running else exit */
+ if (!priv.os_running) {
+ ret = 0;
+ goto unlock;
+ }
+ ret = __nvadsp_os_suspend();
+ if (!ret) {
+#ifdef CONFIG_PM
+ struct device *dev = &priv.pdev->dev;
+
+ free_interrupts(&priv);
+ ret = pm_runtime_put_sync(&priv.pdev->dev);
+ if (ret < 0)
+ dev_err(dev, "failed in pm_runtime_put_sync\n");
+#endif
+ priv.os_running = drv_data->adsp_os_running = false;
+ } else {
+ dev_err(&priv.pdev->dev, "suspend failed with %d\n", ret);
+ dump_adsp_sys();
+ }
+unlock:
+ mutex_unlock(&priv.os_run_lock);
+end:
+ return ret;
+}
+EXPORT_SYMBOL(nvadsp_os_suspend);
+
+static void nvadsp_os_restart(struct work_struct *work)
+{
+ struct nvadsp_os_data *data =
+ container_of(work, struct nvadsp_os_data, restart_os_work);
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(data->pdev);
+ unsigned int wdt_virq = drv_data->agic_irqs[WDT_VIRQ];
+ int wdt_irq = drv_data->chip_data->wdt_irq;
+ struct device *dev = &data->pdev->dev;
+
+ disable_irq(wdt_virq);
+ dump_adsp_sys();
+ nvadsp_os_stop();
+
+ if (tegra_agic_irq_is_active(wdt_irq)) {
+ dev_info(dev, "wdt interrupt is active hence clearing\n");
+ tegra_agic_clear_active(wdt_irq);
+ }
+
+ if (tegra_agic_irq_is_pending(wdt_irq)) {
+ dev_info(dev, "wdt interrupt is pending hence clearing\n");
+ tegra_agic_clear_pending(wdt_irq);
+ }
+
+ dev_info(dev, "wdt interrupt is not pending or active...enabling\n");
+ enable_irq(wdt_virq);
+
+ data->adsp_num_crashes++;
+ if (data->adsp_num_crashes >= ALLOWED_CRASHES) {
+ /* making pdev NULL so that externally start is not called */
+ priv.pdev = NULL;
+ dev_crit(dev, "ADSP has crashed too many times(%d)\n",
+ data->adsp_num_crashes);
+ return;
+ }
+
+ if (nvadsp_os_start())
+ dev_crit(dev, "Unable to restart ADSP OS\n");
+}
+
+static irqreturn_t adsp_wfi_handler(int irq, void *arg)
+{
+ struct nvadsp_os_data *data = arg;
+ struct device *dev = &data->pdev->dev;
+
+ dev_dbg(dev, "%s\n", __func__);
+ complete(&entered_wfi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adsp_wdt_handler(int irq, void *arg)
+{
+ struct nvadsp_os_data *data = arg;
+ struct nvadsp_drv_data *drv_data;
+ struct device *dev = &data->pdev->dev;
+
+ drv_data = platform_get_drvdata(data->pdev);
+
+ drv_data->adsp_crashed = true;
+ wake_up_interruptible(&drv_data->adsp_health_waitq);
+
+ if (!drv_data->adsp_unit_fpga) {
+ dev_crit(dev, "ADSP OS Hanged or Crashed! Restarting...\n");
+ schedule_work(&data->restart_os_work);
+ } else {
+ dev_crit(dev, "ADSP OS Hanged or Crashed!\n");
+ }
+ return IRQ_HANDLED;
+}
+
+void nvadsp_get_os_version(char *buf, int buf_size)
+{
+ struct nvadsp_drv_data *drv_data;
+ struct nvadsp_shared_mem *shared_mem;
+ struct nvadsp_os_info *os_info;
+
+ memset(buf, 0, buf_size);
+
+ if (!priv.pdev)
+ return;
+
+ drv_data = platform_get_drvdata(priv.pdev);
+ shared_mem = drv_data->shared_adsp_os_data;
+ if (shared_mem) {
+ os_info = &shared_mem->os_info;
+ strlcpy(buf, os_info->version, buf_size);
+ } else {
+ strlcpy(buf, "unavailable", buf_size);
+ }
+}
+EXPORT_SYMBOL(nvadsp_get_os_version);
+
+#ifdef CONFIG_DEBUG_FS
+static int show_os_version(struct seq_file *s, void *data)
+{
+ char ver_buf[MAX_OS_VERSION_BUF] = "";
+
+ nvadsp_get_os_version(ver_buf, MAX_OS_VERSION_BUF);
+ seq_printf(s, "version=\"%s\"\n", ver_buf);
+
+ return 0;
+}
+
+static int os_version_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_os_version, inode->i_private);
+}
+
+static const struct file_operations version_fops = {
+ .open = os_version_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define RO_MODE S_IRUSR
+
+static int adsp_create_os_version(struct dentry *adsp_debugfs_root)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct dentry *d;
+
+ d = debugfs_create_file("adspos_version", RO_MODE, adsp_debugfs_root,
+ NULL, &version_fops);
+ if (!d) {
+ dev_err(dev, "failed to create adsp_version\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#if KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE
+static unsigned int adsp_health_poll(struct file *file,
+ poll_table *wait)
+#else
+static __poll_t adsp_health_poll(struct file *file,
+ poll_table *wait)
+#endif
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(priv.pdev);
+
+ poll_wait(file, &drv_data->adsp_health_waitq, wait);
+
+ if (drv_data->adsp_crashed)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations adsp_health_fops = {
+ .poll = adsp_health_poll,
+};
+
+static int adsp_create_adsp_health(struct dentry *adsp_debugfs_root)
+{
+ struct device *dev = &priv.pdev->dev;
+ struct dentry *d;
+
+ d = debugfs_create_file("adsp_health", RO_MODE, adsp_debugfs_root,
+ NULL, &adsp_health_fops);
+ if (!d) {
+ dev_err(dev, "failed to create adsp_health\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+#endif
+
+static ssize_t tegrafw_read_adsp(struct device *dev,
+ char *data, size_t size)
+{
+ nvadsp_get_os_version(data, size);
+ return strlen(data);
+}
+
+int __init nvadsp_os_probe(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+ uint16_t com_mid = ADSP_COM_MBOX_ID;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ priv.unit_fpga_reset_reg = drv_data->base_regs[UNIT_FPGA_RST];
+ priv.hwmailbox_base = drv_data->base_regs[hwmb_reg_idx()];
+ priv.dram_region = drv_data->dram_region;
+
+ priv.adsp_os_addr = drv_data->adsp_mem[ADSP_OS_ADDR];
+ priv.adsp_os_size = drv_data->adsp_mem[ADSP_OS_SIZE];
+ priv.app_alloc_addr = drv_data->adsp_mem[ADSP_APP_ADDR];
+ priv.app_size = drv_data->adsp_mem[ADSP_APP_SIZE];
+
+ if (of_device_is_compatible(dev->of_node, "nvidia,tegra210-adsp")) {
+ drv_data->assert_adsp = __assert_adsp;
+ drv_data->deassert_adsp = __deassert_adsp;
+ }
+
+ ret = nvadsp_os_init(pdev);
+ if (ret) {
+ dev_err(dev, "failed to init os\n");
+ goto end;
+ }
+
+ ret = nvadsp_mbox_open(&adsp_com_mbox, &com_mid, "adsp_com_mbox",
+ NULL, NULL);
+ if (ret) {
+ dev_err(dev, "failed to open adsp com mbox\n");
+ goto end;
+ }
+
+ INIT_WORK(&priv.restart_os_work, nvadsp_os_restart);
+ mutex_init(&priv.fw_load_lock);
+ mutex_init(&priv.os_run_lock);
+
+ priv.pdev = pdev;
+#ifdef CONFIG_DEBUG_FS
+ priv.logger.dev = &pdev->dev;
+ if (adsp_create_debug_logger(drv_data->adsp_debugfs_root))
+ dev_err(dev, "unable to create adsp debug logger file\n");
+
+#ifdef CONFIG_TEGRA_ADSP_CONSOLE
+ priv.console.dev = &pdev->dev;
+ if (adsp_create_cnsl(drv_data->adsp_debugfs_root, &priv.console))
+ dev_err(dev, "unable to create adsp console file\n");
+#endif /* CONFIG_TEGRA_ADSP_CONSOLE */
+
+ if (adsp_create_os_version(drv_data->adsp_debugfs_root))
+ dev_err(dev, "unable to create adsp_version file\n");
+
+ if (adsp_create_adsp_health(drv_data->adsp_debugfs_root))
+ dev_err(dev, "unable to create adsp_health file\n");
+
+ drv_data->adsp_crashed = false;
+ init_waitqueue_head(&drv_data->adsp_health_waitq);
+
+#endif /* CONFIG_DEBUG_FS */
+
+ devm_tegrafw_register(dev, "APE", TFW_DONT_CACHE,
+ tegrafw_read_adsp, NULL);
+end:
+ return ret;
+}
diff --git a/drivers/platform/tegra/nvadsp/os.h b/drivers/platform/tegra/nvadsp/os.h
new file mode 100644
index 00000000..2f8e4b89
--- /dev/null
+++ b/drivers/platform/tegra/nvadsp/os.h
@@ -0,0 +1,198 @@
+/*
+ * os.h
+ *
+ * A header file containing data structures shared with ADSP OS
+ *
+ * Copyright (C) 2014-2022 NVIDIA Corporation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __TEGRA_NVADSP_OS_H
+#define __TEGRA_NVADSP_OS_H
+
+#include
+#include "adsp_shared_struct.h"
+
+#include "dev.h"
+
+#define CONFIG_ADSP_DRAM_LOG_WITH_TAG 1
+/* enable profiling of load init start */
+#define RECORD_STATS 0
+
+#define SYM_NAME_SZ 128
+
+#define AMC_EVP_RESET_VEC_0 0x700
+#define AMC_EVP_UNDEF_VEC_0 0x704
+#define AMC_EVP_SWI_VEC_0 0x708
+#define AMC_EVP_PREFETCH_ABORT_VEC_0 0x70c
+#define AMC_EVP_DATA_ABORT_VEC_0 0x710
+#define AMC_EVP_RSVD_VEC_0 0x714
+#define AMC_EVP_IRQ_VEC_0 0x718
+#define AMC_EVP_FIQ_VEC_0 0x71c
+#define AMC_EVP_RESET_ADDR_0 0x720
+#define AMC_EVP_UNDEF_ADDR_0 0x724
+#define AMC_EVP_SWI_ADDR_0 0x728
+#define AMC_EVP_PREFETCH_ABORT_ADDR_0 0x72c
+#define AMC_EVP_DATA_ABORT_ADDR_0 0x730
+#define AMC_EVP_RSVD_ADDR_0 0x734
+#define AMC_EVP_IRQ_ADDR_0 0x738
+#define AMC_EVP_FIQ_ADDR_0 0x73c
+
+#define AMC_EVP_SIZE (AMC_EVP_FIQ_ADDR_0 - AMC_EVP_RESET_VEC_0 + 4)
+#define AMC_EVP_WSIZE (AMC_EVP_SIZE >> 2)
+
+#define OS_LOAD_TIMEOUT 5000 /* ms */
+#define ADSP_COM_MBOX_ID 2
+
+#define MIN_ADSP_FREQ 38400000lu /* in Hz */
+
+/* macros used to find the current mode of ADSP */
+#define MODE_MASK 0x1f
+#define MODE_USR 0x10
+#define MODE_FIQ 0x11
+#define MODE_IRQ 0x12
+#define MODE_SVC 0x13
+#define MODE_MON 0x16
+#define MODE_ABT 0x17
+#define MODE_UND 0x1b
+#define MODE_SYS 0x1f
+
+/*
+ * ADSP OS Config
+ *
+ * DECOMPRESS (Bit 0) : Set if ADSP FW needs to be decompressed
+ * VIRT CONFIG (Bit 1) : Set if virtualized configuration
+ * DMA PAGE (Bits 7:4) : Contains DMA page information
+ */
+
+#define ADSP_CONFIG_DECOMPRESS_SHIFT 0
+#define ADSP_CONFIG_DECOMPRESS_EN 1
+#define ADSP_CONFIG_DECOMPRESS_MASK (1 << ADSP_CONFIG_DECOMPRESS_SHIFT)
+
+#define ADSP_CONFIG_VIRT_SHIFT 1
+#define ADSP_CONFIG_VIRT_EN 1
+#define ADSP_CONFIG_VIRT_MASK (1 << ADSP_CONFIG_VIRT_SHIFT)
+
+#define ADSP_CONFIG_DMA_PAGE_SHIFT 4
+#define ADSP_CONFIG_DMA_PAGE_MASK (0xF << ADSP_CONFIG_DMA_PAGE_SHIFT)
+
+enum adsp_os_cmd {
+ ADSP_OS_BOOT_COMPLETE,
+ ADSP_OS_SUSPEND,
+ ADSP_OS_RESUME,
+ ADSP_OS_STOP,
+};
+
+#if RECORD_STATS
+#define RECORD_STAT(x) \
+ (x = ktime_to_ns(ktime_get()) - x)
+#define EQUATE_STAT(x, y) \
+ (x = y)
+#define RECORD_TIMESTAMP(x) \
+ (x = nvadsp_get_timestamp_counter())
+#else
+#define RECORD_STAT(x)
+#define EQUATE_STAT(x, y)
+#define RECORD_TIMESTAMP(x)
+#endif
+
+/**
+ * struct global_sym_info - Global Symbol information required by app loader.
+ * @name: Name of the symbol
+ * @addr: Address of the symbol
+ * @info: Type and binding attributes
+ */
+struct global_sym_info {
+ char name[SYM_NAME_SZ];
+ uint32_t addr;
+ unsigned char info;
+};
+
+struct adsp_module {
+ const char *name;
+ void *handle;
+ void *module_ptr;
+ uint32_t adsp_module_ptr;
+ size_t size;
+ const struct app_mem_size mem_size;
+ bool dynamic;
+ char version[16];
+};
+
+struct app_load_stats {
+ s64 ns_time_load;
+ s64 ns_time_service_parse;
+ s64 ns_time_module_load;
+ s64 ns_time_req_firmware;
+ s64 ns_time_layout;
+ s64 ns_time_native_load;
+ s64 ns_time_load_mbox_send_time;
+ s64 ns_time_load_wait_time;
+ s64 ns_time_native_load_complete;
+ u64 ns_time_adsp_map;
+ u64 ns_time_adsp_app_load;
+ u64 ns_time_adsp_send_status;
+ u64 adsp_receive_timestamp;
+ u64 host_send_timestamp;
+ u64 host_receive_timestamp;
+};
+
+struct app_init_stats {
+ s64 ns_time_app_init;
+ s64 ns_time_app_alloc;
+ s64 ns_time_instance_memory;
+ s64 ns_time_native_call;
+ u64 ns_time_adsp_app_init;
+ u64 ns_time_adsp_mem_instance_map;
+ u64 ns_time_adsp_init_call;
+ u64 ns_time_adsp_send_status;
+ u64 adsp_receive_timestamp;
+};
+
+struct app_start_stats {
+ s64 ns_time_app_start;
+ s64 ns_time_native_call;
+ s64 ns_time_adsp_app_start;
+ u64 ns_time_app_thread_creation;
+ u64 ns_time_app_thread_detach;
+ u64 ns_time_app_thread_resume;
+ u64 ns_time_adsp_send_status;
+ u64 adsp_receive_timestamp;
+};
+
+static inline int nvadsp_os_init(struct platform_device *pdev)
+{
+ struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
+
+ if (drv_data->chip_data->os_init)
+ return drv_data->chip_data->os_init(pdev);
+
+ return -EINVAL;
+}
+
+int nvadsp_os_probe(struct platform_device *);
+int nvadsp_app_module_probe(struct platform_device *);
+void *nvadsp_da_to_va_mappings(u64 da, int len);
+int nvadsp_add_load_mappings(phys_addr_t pa, void *mapping, int len);
+struct elf32_shdr *nvadsp_get_section(const struct firmware *, char *);
+struct global_sym_info *find_global_symbol(const char *);
+void update_nvadsp_app_shared_ptr(void *);
+
+struct adsp_module *load_adsp_dynamic_module(const char *, const char *,
+ struct device *);
+struct adsp_module *load_adsp_static_module(const char *,
+ struct adsp_shared_app *, struct device *);
+void unload_adsp_module(struct adsp_module *);
+
+int allocate_memory_from_adsp(void **, unsigned int);
+bool is_adsp_dram_addr(u64);
+int load_adsp_static_apps(void);
+#endif /* __TEGRA_NVADSP_OS_H */