Merge "nvadsp: Merge nvadsp from kernel/nvidia to kernel/nvidia-oot" into dev-main

This commit is contained in:
Gerrit Code Review
2023-06-21 19:40:20 -07:00
42 changed files with 13452 additions and 0 deletions

View File

@@ -0,0 +1,93 @@
config TEGRA_NVADSP
tristate "Enable Host ADSP driver"
default n
select ARM_GIC_PM
select FIQ
help
Enables support for Host ADSP driver.
If unsure, say N
config TEGRA_NVADSP_ON_SMMU
bool "Use SMMU to relocate ADSP"
depends on (TEGRA_IOMMU_SMMU || OF_TEGRA_IOMMU_SMMU) && TEGRA_NVADSP
default n
help
Use SMMU to relocate ADSP OS.
config TEGRA_ADSP_DFS
bool "Enable ADSP DFS"
depends on TEGRA_NVADSP
default n
help
Enable ADSP dynamic frequency scaling. Use this config
to scale adsp frequency via actmon or set fixed value.
If unsure, say N
config TEGRA_ADSP_ACTMON
bool "Enable ADSP ACTMON"
depends on TEGRA_ADSP_DFS
default n
help
Enable ADSP actmon. It converts adsp activty to frequency and
asks adsp dfs to set the adsp frequency. Use it if adsp frequency
to be scaled dynamically by actmon.
If unsure, say N
config TEGRA_ADSP_CPUSTAT
bool "Enable ADSP CPUSTAT"
depends on DEBUG_FS && TEGRA_NVADSP && !TEGRA_ADSP_ACTMON
default n
help
Enable ADSP cpu usage measurement using actmon
If unsure, say N
config TEGRA_ADSP_FILEIO
bool "Enable ADSP file io"
depends on TEGRA_NVADSP
default n
help
Enable dumping to and reading from file on host from ADSP
If unsure, say N
config TEGRA_ADSP_LPTHREAD
bool "Enable ADSP usage calc by lpthread"
depends on DEBUG_FS && TEGRA_NVADSP
default n
help
Enable calculation of ADSP usage by running a low priority
thread in background whenever OS is not suspended. Can be
enable or disabled by echo to adsp_usage file.
If unsure, say N
config TEGRA_EMC_APE_DFS
bool "Enable emc dfs due to APE"
depends on TEGRA_NVADSP
default n
help
Enable emc dfs due to APE DRAM access
If unsure, say N
config TEGRA_ADSP_CONSOLE
bool "Enable ADSP console"
depends on TEGRA_NVADSP
default y
help
Enable ADSP console access
If unsure, say N
config MBOX_ACK_HANDLER
bool "Enable mailbox acknowledge handler"
depends on TEGRA_NVADSP
default n
help
Enable mailbox acknowledge handler
if unsure, say N

View File

@@ -0,0 +1,42 @@
GCOV_PROFILE := y
ccflags-y += -Werror
obj-$(CONFIG_TEGRA_NVADSP) := nvadsp.o
nvadsp-objs += dev.o os.o app.o app_loader_linker.o\
amc.o nvadsp_shared_sema.o \
hwmailbox.o mailbox.o msgq.o \
mem_manager.o aram_manager.o dram_app_mem_manager.o \
dev-t21x.o os-t21x.o dev-t18x.o os-t18x.o acast.o
ifeq ($(CONFIG_TEGRA_ADSP_DFS),y)
nvadsp-objs += adsp_dfs.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_ACTMON),y)
nvadsp-objs += ape_actmon.o
endif
ifeq ($(CONFIG_TEGRA_EMC_APE_DFS),y)
nvadsp-objs += emc_dfs.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_CONSOLE),y)
nvadsp-objs += adsp_console_dbfs.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_CPUSTAT),y)
nvadsp-objs += adsp_cpustat.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_FILEIO),y)
nvadsp-objs += adspff.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_LPTHREAD),y)
nvadsp-objs += adsp_lpthread.o
endif
ifeq ($(CONFIG_TEGRA_VIRT_AUDIO_IVC),y)
ccflags-y += -I$(srctree.nvidia)/drivers/platform/tegra/nvaudio_ivc/
endif

View File

@@ -0,0 +1,240 @@
/*
* Copyright (C) 2016-2022 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/io.h>
#include "dev.h"
#include "dev-t18x.h"
#define AST_CONTROL 0x000
#define AST_STREAMID_CTL_0 0x020
#define AST_STREAMID_CTL_1 0x024
#define AST_RGN_SLAVE_BASE_LO 0x100
#define AST_RGN_SLAVE_BASE_HI 0x104
#define AST_RGN_MASK_BASE_LO 0x108
#define AST_RGN_MASK_BASE_HI 0x10c
#define AST_RGN_MASTER_BASE_LO 0x110
#define AST_RGN_MASTER_BASE_HI 0x114
#define AST_RGN_CONTROL 0x118
#define AST_PAGE_MASK (~0xFFF)
#define AST_LO_SHIFT 32
#define AST_LO_MASK 0xFFFFFFFF
#define AST_PHY_SID_IDX 0
#define AST_APE_SID_IDX 1
#define AST_NS (1 << 3)
#define AST_CARVEOUTID(ID) (ID << 5)
#define AST_VMINDEX(IDX) (IDX << 15)
#define AST_PHSICAL(PHY) (PHY << 19)
#define AST_STREAMID(ID) (ID << 8)
#define AST_VMINDEX_ENABLE (1 << 0)
#define AST_RGN_ENABLE (1 << 0)
#define AST_RGN_OFFSET 0x20
struct acast_region {
u32 rgn;
u32 rgn_ctrl;
u32 strmid_reg;
u32 strmid_ctrl;
u64 slave;
u64 size;
u64 master;
};
#define NUM_MAX_ACAST 2
#define ACAST_RGN_PHY 0x0
#define ACAST_RGN_CTL_PHY (AST_PHSICAL(1) | AST_CARVEOUTID(0x7))
#define ACAST_RGN_VM 0x2
#define ACAST_VMINDEX 1
#define ACAST_RGN_CTL_VM(IDX) AST_VMINDEX(IDX)
#define ACAST_SID_REG_EVAL(IDX) AST_STREAMID_CTL_##IDX
#define ACAST_STRMID_REG(IDX) ACAST_SID_REG_EVAL(IDX)
#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE
/* Older kernels do not have this function, so stubbing it */
static inline int of_property_read_u64_index(const struct device_node *np,
const char *propname, u32 index, u64 *out_value)
{
return -ENOSYS;
}
#endif
static inline void acast_write(void __iomem *acast, u32 reg, u32 val)
{
writel(val, acast + reg);
}
static inline u32 acast_read(void __iomem *acast, u32 reg)
{
return readl(acast + reg);
}
static inline u32 acast_rgn_reg(u32 rgn, u32 reg)
{
return rgn * AST_RGN_OFFSET + reg;
}
static void tegra18x_acast_map(struct device *dev,
void __iomem *acast, u32 rgn, u32 rgn_ctrl,
u32 strmid_reg, u32 strmid_ctrl,
u64 slave, u64 size, u64 master)
{
u32 val;
val = acast_read(acast, acast_rgn_reg(rgn, AST_RGN_SLAVE_BASE_LO));
if (val & AST_RGN_ENABLE) {
dev_warn(dev, "ACAST rgn %u already mapped...skipping\n", rgn);
return;
}
val = master & AST_LO_MASK;
acast_write(acast,
acast_rgn_reg(rgn, AST_RGN_MASTER_BASE_LO), val);
val = master >> AST_LO_SHIFT;
acast_write(acast,
acast_rgn_reg(rgn, AST_RGN_MASTER_BASE_HI), val);
val = ((size - 1) & AST_PAGE_MASK) & AST_LO_MASK;
acast_write(acast,
acast_rgn_reg(rgn, AST_RGN_MASK_BASE_LO), val);
val = (size - 1) >> AST_LO_SHIFT;
acast_write(acast,
acast_rgn_reg(rgn, AST_RGN_MASK_BASE_HI), val);
val = acast_read(acast, acast_rgn_reg(rgn, AST_RGN_CONTROL));
val |= rgn_ctrl;
acast_write(acast,
acast_rgn_reg(rgn, AST_RGN_CONTROL), val);
if (strmid_reg)
acast_write(acast, strmid_reg, strmid_ctrl);
val = slave >> AST_LO_SHIFT;
acast_write(acast,
acast_rgn_reg(rgn, AST_RGN_SLAVE_BASE_HI), val);
val = (slave & AST_LO_MASK) | AST_RGN_ENABLE;
acast_write(acast,
acast_rgn_reg(rgn, AST_RGN_SLAVE_BASE_LO), val);
}
static int tegra18x_acast_init(struct device *dev,
uint32_t acast_addr, uint32_t acast_size,
struct acast_region *acast_regions, uint32_t num_regions)
{
void __iomem *acast_base;
int i;
acast_base = devm_ioremap(dev, acast_addr, acast_size);
if (IS_ERR_OR_NULL(acast_base)) {
dev_err(dev, "failed to map ACAST 0x%x\n", acast_addr);
return PTR_ERR(acast_base);
}
for (i = 0; i < num_regions; i++) {
tegra18x_acast_map(dev, acast_base,
acast_regions[i].rgn,
acast_regions[i].rgn_ctrl,
acast_regions[i].strmid_reg,
acast_regions[i].strmid_ctrl,
acast_regions[i].slave,
acast_regions[i].size,
acast_regions[i].master);
dev_dbg(dev, "i:%d rgn:0x%x rgn_ctrl:0x%x ",
i, acast_regions[i].rgn, acast_regions[i].rgn_ctrl);
dev_dbg(dev, "strmid_reg:0x%x strmid_ctrl:0x%x ",
acast_regions[i].strmid_reg,
acast_regions[i].strmid_ctrl);
dev_dbg(dev, "slave:0x%llx size:0x%llx master:0x%llx\n",
acast_regions[i].slave, acast_regions[i].size,
acast_regions[i].master);
}
return 0;
}
int nvadsp_acast_t18x_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct resource *co_mem = &drv_data->co_mem;
uint32_t acast_addr, acast_size;
int iter, num_acast = 0, ret = 0;
struct acast_region acast_config;
if (co_mem->start) {
acast_config.rgn = ACAST_RGN_PHY;
acast_config.rgn_ctrl = ACAST_RGN_CTL_PHY;
acast_config.strmid_reg = 0;
acast_config.strmid_ctrl = 0;
acast_config.slave = drv_data->adsp_mem[ADSP_OS_ADDR];
acast_config.size = drv_data->adsp_mem[ADSP_OS_SIZE];
acast_config.master = co_mem->start;
} else {
uint32_t stream_id;
uint64_t iommu_addr_start, iommu_addr_end;
if (of_property_read_u32_index(dev->of_node,
"iommus", 1, &stream_id)) {
dev_warn(dev, "no SMMU stream ID found\n");
goto exit;
}
if (of_property_read_u64_index(dev->of_node,
"iommu-resv-regions", 1, &iommu_addr_start)) {
dev_warn(dev, "no IOMMU reserved region\n");
goto exit;
}
if (of_property_read_u64_index(dev->of_node,
"iommu-resv-regions", 2, &iommu_addr_end)) {
dev_warn(dev, "no IOMMU reserved region\n");
goto exit;
}
acast_config.rgn = ACAST_RGN_VM;
acast_config.rgn_ctrl = ACAST_RGN_CTL_VM(ACAST_VMINDEX);
acast_config.strmid_reg = ACAST_STRMID_REG(ACAST_VMINDEX);
acast_config.strmid_ctrl = AST_STREAMID(stream_id) |
AST_VMINDEX_ENABLE;
acast_config.slave = iommu_addr_start;
acast_config.size = (iommu_addr_end - acast_config.slave);
acast_config.master = iommu_addr_start;
}
for (iter = 0; iter < (NUM_MAX_ACAST * 2); iter += 2) {
if (of_property_read_u32_index(dev->of_node,
"nvidia,acast_config", iter, &acast_addr))
continue;
if (of_property_read_u32_index(dev->of_node,
"nvidia,acast_config", (iter + 1), &acast_size))
continue;
ret = tegra18x_acast_init(dev, acast_addr, acast_size,
&acast_config, 1);
if (ret)
goto exit;
num_acast++;
}
if (num_acast == 0)
dev_warn(dev, "no ACAST configurations found\n");
exit:
return ret;
}

View File

@@ -0,0 +1,449 @@
/*
* adsp_console_dbfs.c
*
* adsp mailbox console driver
*
* Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tegra_nvadsp.h>
#include <linux/platform_device.h>
#include <uapi/misc/adsp_console_ioctl.h>
#include <linux/uaccess.h>
#include "dev.h"
#include "adsp_console_dbfs.h"
#define USE_RUN_APP_API
static int open_cnt;
#define ADSP_APP_CTX_MAX 32
static uint64_t adsp_app_ctx_vals[ADSP_APP_CTX_MAX];
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
#define ACCESS_OK(addr, size) access_ok(0, addr, size)
#else
#define ACCESS_OK(addr, size) access_ok(addr, size)
#endif
static int adsp_app_ctx_add(uint64_t ctx)
{
int i;
if (ctx == 0)
return -EINVAL;
for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
if (adsp_app_ctx_vals[i] == 0) {
adsp_app_ctx_vals[i] = ctx;
return 0;
}
}
return -EINVAL;
}
static int adsp_app_ctx_check(uint64_t ctx)
{
int i;
if (ctx == 0)
return -EINVAL;
for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
if (adsp_app_ctx_vals[i] == ctx)
return 0;
}
return -EINVAL;
}
static void adsp_app_ctx_remove(uint64_t ctx)
{
int i;
for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
if (adsp_app_ctx_vals[i] == ctx) {
adsp_app_ctx_vals[i] = 0;
return;
}
}
}
static int adsp_consol_open(struct inode *i, struct file *f)
{
int ret;
uint16_t snd_mbox_id = 30;
struct nvadsp_cnsl *console = i->i_private;
struct device *dev = console->dev;
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
if (open_cnt)
return -EBUSY;
open_cnt++;
ret = 0;
f->private_data = console;
if (!drv_data->adsp_os_running)
goto exit_open;
ret = nvadsp_mbox_open(&console->shl_snd_mbox, &snd_mbox_id,
"adsp_send_cnsl", NULL, NULL);
if (!ret)
goto exit_open;
pr_err("adsp_consol: Failed to init adsp_consol send mailbox");
memset(&console->shl_snd_mbox, 0, sizeof(struct nvadsp_mbox));
open_cnt--;
exit_open:
return ret;
}
static int adsp_consol_close(struct inode *i, struct file *f)
{
int ret = 0;
struct nvadsp_cnsl *console = i->i_private;
struct nvadsp_mbox *mbox = &console->shl_snd_mbox;
struct device *dev = console->dev;
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
open_cnt--;
if (!drv_data->adsp_os_running || (0 == mbox->id))
goto exit_close;
ret = nvadsp_mbox_close(mbox);
if (ret)
pr_err("adsp_consol: Failed to close adsp_consol send mailbox)");
memset(mbox, 0, sizeof(struct nvadsp_mbox));
exit_close:
return ret;
}
static long
adsp_consol_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int ret = 0;
uint16_t *mid;
uint16_t mbxid = 0;
uint32_t data;
uint64_t ctx2;
nvadsp_app_info_t *app_info;
struct adsp_consol_run_app_arg_t app_args;
struct nvadsp_cnsl *console = f->private_data;
struct nvadsp_mbox *mbox;
struct device *dev = console->dev;
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
void __user *uarg = (void __user *)arg;
if (_IOC_TYPE(cmd) != NV_ADSP_CONSOLE_MAGIC)
return -EFAULT;
if ((_IOC_NR(cmd) != _IOC_NR(ADSP_CNSL_LOAD)) &&
(_IOC_NR(cmd) != _IOC_NR(ADSP_CNSL_RESUME)) &&
(!drv_data->adsp_os_running)) {
dev_info(dev, "adsp_consol: os not running.");
return -EPERM;
}
if ((_IOC_NR(cmd) != _IOC_NR(ADSP_CNSL_LOAD)) &&
(0 == console->shl_snd_mbox.id)) {
dev_info(dev, "adsp_consol: Mailboxes not open.");
return -EPERM;
}
switch (_IOC_NR(cmd)) {
case _IOC_NR(ADSP_CNSL_LOAD):
ret = 0;
if (drv_data->adsp_os_running)
break;
mbxid = 30;
mbox = &console->shl_snd_mbox;
ret = nvadsp_os_load();
if (ret) {
dev_info(dev, "adsp_consol: Load OS Failed.");
break;
}
ret = nvadsp_os_start();
if (ret) {
dev_info(dev, "adsp_consol: Start OS Failed.");
break;
}
ret = nvadsp_mbox_open(mbox, &mbxid,
"adsp_send_cnsl", NULL, NULL);
if (!ret)
break;
pr_err("adsp_consol: Failed to init adsp_consol send mailbox");
memset(mbox, 0, sizeof(struct nvadsp_mbox));
break;
case _IOC_NR(ADSP_CNSL_SUSPEND):
ret = nvadsp_os_suspend();
if (ret)
dev_info(dev, "adsp_consol: OS Suspend Failed.");
break;
case _IOC_NR(ADSP_CNSL_STOP):
nvadsp_os_stop();
break;
case _IOC_NR(ADSP_CNSL_RESUME):
if (!drv_data->adsp_os_suspended) {
dev_info(dev, "adsp_consol: OS is not suspended to perform resume.");
break;
}
ret = nvadsp_os_start();
if (ret)
dev_info(dev, "adsp_consol: OS Resume Failed.");
break;
case _IOC_NR(ADSP_CNSL_RUN_APP):
if (!ACCESS_OK(uarg, sizeof(struct adsp_consol_run_app_arg_t)))
return -EACCES;
ret = copy_from_user(&app_args, uarg,
sizeof(app_args));
if (ret) {
ret = -EACCES;
break;
}
dev_info(dev, "Core ID: %d\n", app_args.core_id);
app_args.app_name[NVADSP_NAME_SZ_MAX] = '\0';
#ifdef USE_RUN_APP_API
app_args.ctx2 = (uint64_t)nvadsp_run_app(NULL,
app_args.app_name,
(nvadsp_app_args_t *)&app_args.args[0],
NULL, 0, app_args.core_id, true);
if (!app_args.ctx2) {
dev_info(dev, "adsp_consol: unable to run %s\n",
app_args.app_name);
return -EINVAL;
}
if (adsp_app_ctx_add(app_args.ctx2)) {
dev_info(dev, "adsp_consol: unable to add %s ctx\n",
app_args.app_name);
return -EINVAL;
}
#else
app_args.ctx1 = (uint64_t)nvadsp_app_load(app_args.app_path,
app_args.app_name);
if (!app_args.ctx1) {
dev_info(dev,
"adsp_consol: dynamic app load failed %s\n",
app_args.app_name);
return -EINVAL;
}
if (adsp_app_ctx_add(app_args.ctx1)) {
dev_info(dev, "adsp_consol: unable to add %s ctx\n",
app_args.app_name);
return -EINVAL;
}
dev_info(dev, "adsp_consol: calling nvadsp_app_init\n");
app_args.ctx2 =
(uint64_t)nvadsp_app_init((void *)app_args.ctx1, NULL);
if (!app_args.ctx2) {
dev_info(dev,
"adsp_consol: unable to initilize the app\n");
return -EINVAL;
}
if (adsp_app_ctx_add(app_args.ctx2)) {
dev_info(dev, "adsp_consol: unable to add %s ctx\n",
app_args.app_name);
return -EINVAL;
}
dev_info(dev, "adsp_consol: calling nvadsp_app_start\n");
ret = nvadsp_app_start((void *)app_args.ctx2);
if (ret) {
dev_info(dev, "adsp_consol: unable to start the app\n");
break;
}
#endif
ret = copy_to_user((void __user *) arg, &app_args,
sizeof(struct adsp_consol_run_app_arg_t));
if (ret)
ret = -EACCES;
break;
case _IOC_NR(ADSP_CNSL_STOP_APP):
if (!ACCESS_OK(uarg, sizeof(struct adsp_consol_run_app_arg_t)))
return -EACCES;
ret = copy_from_user(&app_args, uarg,
sizeof(app_args));
if (ret) {
ret = -EACCES;
break;
}
#ifdef USE_RUN_APP_API
if (!app_args.ctx2) {
ret = -EACCES;
break;
}
if (adsp_app_ctx_check(app_args.ctx2)) {
dev_info(dev, "adsp_consol: unable to check %s ctx\n",
app_args.app_name);
return -EINVAL;
}
app_args.ctx1 = (uint64_t)
((nvadsp_app_info_t *)app_args.ctx2)->handle;
nvadsp_exit_app((nvadsp_app_info_t *)app_args.ctx2, false);
nvadsp_app_unload((const void *)app_args.ctx1);
adsp_app_ctx_remove(app_args.ctx2);
#else
if ((!app_args.ctx2) || (!app_args.ctx1)) {
ret = -EACCES;
break;
}
if (adsp_app_ctx_check(app_args.ctx2) ||
adsp_app_ctx_check(app_args.ctx1)) {
dev_info(dev, "adsp_consol: unable to check %s ctx\n",
app_args.app_name);
return -EINVAL;
}
nvadsp_app_deinit((void *)app_args.ctx2);
nvadsp_app_unload((void *)app_args.ctx1);
adsp_app_ctx_remove(app_args.ctx2);
adsp_app_ctx_remove(app_args.ctx1);
#endif
break;
case _IOC_NR(ADSP_CNSL_CLR_BUFFER):
break;
case _IOC_NR(ADSP_CNSL_OPN_MBX):
if (!ACCESS_OK(uarg, sizeof(ctx2)))
return -EACCES;
ret = copy_from_user(&ctx2, uarg, sizeof(ctx2));
if (ret) {
ret = -EACCES;
break;
}
if (adsp_app_ctx_check(ctx2)) {
dev_info(dev, "adsp_consol: unable to check ctx\n");
return -EINVAL;
}
app_info = (nvadsp_app_info_t *)ctx2;
if (app_info && app_info->mem.shared) {
mid = (short *)(app_info->mem.shared);
dev_info(dev, "adsp_consol: open %x\n", *mid);
mbxid = *mid;
}
ret = nvadsp_mbox_open(&console->app_mbox, &mbxid,
"app_mbox", NULL, NULL);
if (ret) {
pr_err("adsp_consol: Failed to open app mailbox");
ret = -EACCES;
}
break;
case _IOC_NR(ADSP_CNSL_CLOSE_MBX):
mbox = &console->app_mbox;
while (!nvadsp_mbox_recv(mbox, &data, 0, 0))
;
ret = nvadsp_mbox_close(mbox);
if (ret)
break;
memset(mbox, 0, sizeof(struct nvadsp_mbox));
break;
case _IOC_NR(ADSP_CNSL_PUT_MBX):
if (!ACCESS_OK(uarg, sizeof(uint32_t)))
return -EACCES;
ret = copy_from_user(&data, uarg,
sizeof(uint32_t));
if (ret) {
ret = -EACCES;
break;
}
ret = nvadsp_mbox_send(&console->app_mbox, data,
NVADSP_MBOX_SMSG, 0, 0);
break;
case _IOC_NR(ADSP_CNSL_GET_MBX):
if (!ACCESS_OK(uarg, sizeof(uint32_t)))
return -EACCES;
ret = nvadsp_mbox_recv(&console->app_mbox, &data, 0, 0);
if (ret)
break;
ret = copy_to_user(uarg, &data,
sizeof(uint32_t));
if (ret)
ret = -EACCES;
break;
case _IOC_NR(ADSP_CNSL_PUT_DATA):
if (!ACCESS_OK(uarg, sizeof(struct adsp_consol_run_app_arg_t)))
return -EACCES;
ret = copy_from_user(&data, uarg, sizeof(uint32_t));
if (ret) {
ret = -EACCES;
break;
}
return nvadsp_mbox_send(&console->shl_snd_mbox, data,
NVADSP_MBOX_SMSG, 0, 0);
break;
default:
dev_info(dev, "adsp_consol: invalid command\n");
return -EINVAL;
}
return ret;
}
static const struct file_operations adsp_console_operations = {
.open = adsp_consol_open,
.release = adsp_consol_close,
#ifdef CONFIG_COMPAT
.compat_ioctl = adsp_consol_ioctl,
#endif
.unlocked_ioctl = adsp_consol_ioctl
};
int
adsp_create_cnsl(struct dentry *adsp_debugfs_root, struct nvadsp_cnsl *cnsl)
{
int ret = 0;
struct device *dev = cnsl->dev;
if (IS_ERR_OR_NULL(adsp_debugfs_root)) {
ret = -ENOENT;
goto err_out;
}
if (!debugfs_create_file("adsp_console", S_IRUSR,
adsp_debugfs_root, cnsl,
&adsp_console_operations)) {
dev_err(dev,
"unable to create adsp console debug fs file\n");
ret = -ENOENT;
goto err_out;
}
memset(&cnsl->app_mbox, 0, sizeof(cnsl->app_mbox));
err_out:
return ret;
}

View File

@@ -0,0 +1,31 @@
/*
* adsp_console_dbfs.h
*
* A header file for adsp console driver
*
* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef ADSP_CNSL_DBFS_H
#define ADSP_CNSL_DBFS_H
struct nvadsp_cnsl {
struct device *dev;
struct nvadsp_mbox shl_snd_mbox;
struct nvadsp_mbox app_mbox;
};
int
adsp_create_cnsl(struct dentry *adsp_debugfs_root, struct nvadsp_cnsl *cnsl);
#endif /* ADSP_CNSL_DBFS_H */

View File

@@ -0,0 +1,330 @@
/*
* Copyright (C) 2015-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/platform/tegra/clock.h>
#include <linux/irqchip/tegra-agic.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include "dev.h"
#define ACTMON_DEV_CTRL 0x00
#define ACTMON_DEV_CTRL_ENB (0x1 << 31)
#define ACTMON_DEV_CTRL_AT_END_ENB (0x1 << 15)
#define ACTMON_DEV_CTRL_PERIODIC_ENB (0x1 << 13)
#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT (0)
#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK (0xff << 0)
#define ACTMON_DEV_COUNT 0x18
#define ACTMON_DEV_INTR_STATUS 0x20
#define ACTMON_DEV_INTR_AT_END (0x1 << 27)
#define ACTMON_DEV_COUNT_WEGHT 0x24
#define ACTMON_DEV_SAMPLE_CTRL 0x28
#define ACTMON_DEV_SAMPLE_CTRL_TICK_65536 (0x1 << 2)
#define ACTMON_DEV_SAMPLE_CTRL_TICK_256 (0x0 << 1)
#define AMISC_ACTMON_0 0x54
#define AMISC_ACTMON_CNT_TARGET_ENABLE (0x1 << 31)
#define ACTMON_REG_OFFSET 0x800
/* milli second divider as SAMPLE_TICK*/
#define SAMPLE_MS_DIVIDER 65536
struct adsp_cpustat {
int irq;
struct device *device;
const char *dev_id;
spinlock_t lock;
struct clk *ape_clk;
struct clk *adsp_clk;
unsigned long ape_freq;
unsigned long adsp_freq;
u64 cur_usage;
bool enable;
u64 max_usage;
void __iomem *base;
};
static struct adsp_cpustat cpustat;
static struct adsp_cpustat *cpumon;
static inline u32 actmon_readl(u32 offset)
{
return __raw_readl(cpumon->base + offset);
}
static inline void actmon_writel(u32 val, u32 offset)
{
__raw_writel(val, cpumon->base + offset);
}
static inline void actmon_wmb(void)
{
wmb();
}
static irqreturn_t adsp_cpustat_isr(int irq, void *dev_id)
{
u32 val;
unsigned long period, flags;
spin_lock_irqsave(&cpumon->lock, flags);
val = actmon_readl(ACTMON_DEV_INTR_STATUS);
actmon_writel(val, ACTMON_DEV_INTR_STATUS);
if (val & ACTMON_DEV_INTR_AT_END) {
period = (255 * SAMPLE_MS_DIVIDER) / cpumon->ape_freq;
cpumon->cur_usage =
((u64)actmon_readl(ACTMON_DEV_COUNT) * 100) / (period * cpumon->adsp_freq);
if (cpumon->cur_usage > cpumon->max_usage)
cpumon->max_usage = cpumon->cur_usage;
}
spin_unlock_irqrestore(&cpumon->lock, flags);
return IRQ_HANDLED;
}
static void configure_actmon(void)
{
u32 val;
/* Set countb weight to 256 */
actmon_writel(0x100, ACTMON_DEV_COUNT_WEGHT);
/* Enable periodic sampling */
val = actmon_readl(ACTMON_DEV_CTRL);
val |= ACTMON_DEV_CTRL_PERIODIC_ENB;
/* Set sampling period to max i,e, 255 ape clks */
val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
val |= (0xFF <<
ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
& ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
/* Enable the AT_END interrupt */
val |= ACTMON_DEV_CTRL_AT_END_ENB;
actmon_writel(val, ACTMON_DEV_CTRL);
actmon_writel(ACTMON_DEV_SAMPLE_CTRL_TICK_65536,
ACTMON_DEV_SAMPLE_CTRL);
actmon_wmb();
}
static void adsp_cpustat_enable(void)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&cpumon->lock, flags);
val = actmon_readl(ACTMON_DEV_CTRL);
val |= ACTMON_DEV_CTRL_ENB;
actmon_writel(val, ACTMON_DEV_CTRL);
actmon_wmb();
enable_irq(cpumon->irq);
spin_unlock_irqrestore(&cpumon->lock, flags);
}
static void adsp_cpustat_disable(void)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&cpumon->lock, flags);
disable_irq(cpumon->irq);
val = actmon_readl(ACTMON_DEV_CTRL);
val &= ~ACTMON_DEV_CTRL_ENB;
actmon_writel(val, ACTMON_DEV_CTRL);
actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS);
actmon_wmb();
spin_unlock_irqrestore(&cpumon->lock, flags);
}
#define RW_MODE (S_IWUSR | S_IRUSR)
#define RO_MODE S_IRUSR
static int cur_usage_get(void *data, u64 *val)
{
*val = cpumon->cur_usage;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cur_usage_fops, cur_usage_get, NULL, "%llu\n");
static int max_usage_get(void *data, u64 *val)
{
*val = cpumon->max_usage;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(max_usage_fops, max_usage_get, NULL, "%llu\n");
static int enable_set(void *data, u64 val)
{
if (cpumon->enable == (bool)val)
return 0;
cpumon->enable = (bool)val;
if (cpumon->enable)
adsp_cpustat_enable();
else
adsp_cpustat_disable();
return 0;
}
static int enable_get(void *data, u64 *val)
{
*val = cpumon->enable;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(enable_fops, enable_get, enable_set, "%llu\n");
static int cpustat_debugfs_init(struct nvadsp_drv_data *drv)
{
int ret = -ENOMEM;
struct dentry *d, *dir;
if (!drv->adsp_debugfs_root)
return ret;
dir = debugfs_create_dir("adsp_cpustat", drv->adsp_debugfs_root);
if (!dir)
return ret;
d = debugfs_create_file(
"cur_usage", RO_MODE, dir, cpumon, &cur_usage_fops);
if (!d)
return ret;
d = debugfs_create_file(
"max_usage", RO_MODE, dir, cpumon, &max_usage_fops);
if (!d)
return ret;
d = debugfs_create_file(
"enable", RW_MODE, dir, cpumon, &enable_fops);
if (!d)
return ret;
return 0;
}
int adsp_cpustat_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
static void __iomem *amisc_base;
u32 val;
int ret = -EINVAL;
if (drv->cpustat_initialized)
return 0;
cpumon = &cpustat;
spin_lock_init(&cpumon->lock);
cpumon->base = drv->base_regs[AMISC] + ACTMON_REG_OFFSET;
amisc_base = drv->base_regs[AMISC];
cpumon->ape_clk = clk_get_sys(NULL, "adsp.ape");
if (IS_ERR_OR_NULL(cpumon->ape_clk)) {
dev_err(cpumon->device, "Failed to find adsp.ape clk\n");
ret = -EINVAL;
goto err_ape_clk;
}
ret = clk_prepare_enable(cpumon->ape_clk);
if (ret) {
dev_err(cpumon->device, "Failed to enable ape clock\n");
goto err_ape_enable;
}
cpumon->ape_freq = clk_get_rate(cpumon->ape_clk) / 1000;
cpumon->adsp_clk = clk_get_sys(NULL, "adsp_cpu");
if (IS_ERR_OR_NULL(cpumon->adsp_clk)) {
dev_err(cpumon->device, "Failed to find adsp cpu clock\n");
ret = -EINVAL;
goto err_adsp_clk;
}
ret = clk_prepare_enable(cpumon->adsp_clk);
if (ret) {
dev_err(cpumon->device, "Failed to enable adsp cpu clock\n");
goto err_adsp_enable;
}
cpumon->adsp_freq = clk_get_rate(cpumon->adsp_clk) / 1000;
/* Enable AMISC_ACTMON */
val = __raw_readl(amisc_base + AMISC_ACTMON_0);
val |= AMISC_ACTMON_CNT_TARGET_ENABLE;
__raw_writel(val, amisc_base + AMISC_ACTMON_0);
/* Clear all interrupts */
actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS);
/* One time configuration of actmon regs */
configure_actmon();
cpumon->irq = drv->agic_irqs[ACTMON_VIRQ];
ret = request_irq(cpumon->irq, adsp_cpustat_isr,
IRQ_TYPE_LEVEL_HIGH, "adsp_actmon", cpumon);
if (ret) {
dev_err(cpumon->device, "Failed irq %d request\n", cpumon->irq);
goto err_irq;
}
cpustat_debugfs_init(drv);
drv->cpustat_initialized = true;
return 0;
err_irq:
clk_disable_unprepare(cpumon->adsp_clk);
err_adsp_enable:
clk_put(cpumon->adsp_clk);
err_adsp_clk:
clk_disable_unprepare(cpumon->ape_clk);
err_ape_enable:
clk_put(cpumon->ape_clk);
err_ape_clk:
return ret;
}
int adsp_cpustat_exit(struct platform_device *pdev)
{
status_t ret = 0;
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
if (!drv->cpustat_initialized) {
ret = -EINVAL;
goto end;
}
free_irq(cpumon->irq, cpumon);
clk_disable_unprepare(cpumon->adsp_clk);
clk_put(cpumon->adsp_clk);
clk_put(cpumon->ape_clk);
drv->cpustat_initialized = false;
end:
return ret;
}

View File

@@ -0,0 +1,877 @@
/*
* adsp_dfs.c
*
* adsp dynamic frequency scaling
*
* Copyright (C) 2014-2020, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include <linux/clk/tegra.h>
#include <linux/seq_file.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
#include <asm/cputime.h>
#else
#include <linux/sched/cputime.h>
#endif
#include <linux/slab.h>
#include "dev.h"
#include "ape_actmon.h"
#include "os.h"
#ifndef CONFIG_TEGRA_ADSP_ACTMON
void actmon_rate_change(unsigned long freq, bool override)
{
}
#endif
#define MBOX_TIMEOUT 5000 /* in ms */
#define HOST_ADSP_DFS_MBOX_ID 3
enum adsp_dfs_reply {
ACK,
NACK,
};
/*
* Freqency in Hz.The frequency always needs to be a multiple of 12.8 Mhz and
* should be extended with a slab 38.4 Mhz.
*/
static unsigned long adsp_cpu_freq_table_t21x[] = {
MIN_ADSP_FREQ,
MIN_ADSP_FREQ * 2,
MIN_ADSP_FREQ * 3,
MIN_ADSP_FREQ * 4,
MIN_ADSP_FREQ * 5,
MIN_ADSP_FREQ * 6,
MIN_ADSP_FREQ * 7,
MIN_ADSP_FREQ * 8,
MIN_ADSP_FREQ * 9,
MIN_ADSP_FREQ * 10,
MIN_ADSP_FREQ * 11,
MIN_ADSP_FREQ * 12,
MIN_ADSP_FREQ * 13,
MIN_ADSP_FREQ * 14,
MIN_ADSP_FREQ * 15,
MIN_ADSP_FREQ * 16,
MIN_ADSP_FREQ * 17,
MIN_ADSP_FREQ * 18,
MIN_ADSP_FREQ * 19,
MIN_ADSP_FREQ * 20,
MIN_ADSP_FREQ * 21,
};
/*
* Frequency in Hz.
*/
static unsigned long adsp_cpu_freq_table_t18x[] = {
150000000lu,
300000000lu,
600000000lu,
};
static unsigned long *adsp_cpu_freq_table;
static int adsp_cpu_freq_table_size;
struct adsp_dfs_policy {
bool enable;
/* update_freq_flag = TRUE, ADSP ACKed the new freq
* = FALSE, ADSP NACKed the new freq
*/
bool update_freq_flag;
const char *clk_name;
unsigned long min; /* in kHz */
unsigned long max; /* in kHz */
unsigned long cur; /* in kHz */
unsigned long cpu_min; /* ADSP min freq(KHz). Remain unchanged */
unsigned long cpu_max; /* ADSP max freq(KHz). Remain unchanged */
struct clk *adsp_clk;
struct clk *aclk_clk;
struct clk *adsp_cpu_abus_clk;
struct nvadsp_mbox mbox;
#ifdef CONFIG_DEBUG_FS
struct dentry *root;
#endif
unsigned long ovr_freq;
};
#define MAX_SIZE(x, y) (x > y ? x : y)
#define TIME_IN_STATE_SIZE MAX_SIZE(ARRAY_SIZE(adsp_cpu_freq_table_t21x), \
ARRAY_SIZE(adsp_cpu_freq_table_t18x))
struct adsp_freq_stats {
struct device *dev;
unsigned long long last_time;
int last_index;
u64 time_in_state[TIME_IN_STATE_SIZE];
int state_num;
};
static struct adsp_dfs_policy *policy;
static struct adsp_freq_stats freq_stats;
static struct device *device;
static DEFINE_MUTEX(policy_mutex);
static bool is_os_running(struct device *dev)
{
struct platform_device *pdev;
struct nvadsp_drv_data *drv_data;
if (!dev)
return false;
pdev = to_platform_device(dev);
drv_data = platform_get_drvdata(pdev);
if (!drv_data->adsp_os_running) {
dev_dbg(&pdev->dev, "%s: adsp os is not loaded\n", __func__);
return false;
}
return true;
}
static int adsp_clk_get(struct adsp_dfs_policy *policy)
{
struct device_node *node = device->of_node;
int ret = 0;
policy->adsp_clk = devm_clk_get(device, "adsp");
if (IS_ERR_OR_NULL(policy->adsp_clk)) {
dev_err(device, "unable to find adsp clock\n");
ret = PTR_ERR(policy->adsp_clk);
}
if (!of_device_is_compatible(node, "nvidia,tegra210-adsp")) {
policy->aclk_clk = devm_clk_get(device, "aclk");
if (IS_ERR_OR_NULL(policy->aclk_clk)) {
dev_err(device, "unable to find aclk clock\n");
ret = PTR_ERR(policy->aclk_clk);
}
} else {
policy->adsp_cpu_abus_clk =
devm_clk_get(device, "adsp_cpu_abus");
if (IS_ERR_OR_NULL(policy->adsp_cpu_abus_clk)) {
dev_err(device, "unable to find adsp cpu abus clock\n");
ret = PTR_ERR(policy->adsp_cpu_abus_clk);
}
}
return ret;
}
static void adsp_clk_put(struct adsp_dfs_policy *policy)
{
if (policy->adsp_cpu_abus_clk)
devm_clk_put(device, policy->adsp_cpu_abus_clk);
if (policy->adsp_clk)
devm_clk_put(device, policy->adsp_clk);
if (policy->aclk_clk)
devm_clk_put(device, policy->aclk_clk);
}
static int adsp_clk_set_rate(struct adsp_dfs_policy *policy,
unsigned long freq_hz)
{
struct device_node *node = device->of_node;
int ret;
if (of_device_is_compatible(node, "nvidia,tegra210-adsp"))
ret = clk_set_rate(policy->adsp_cpu_abus_clk, freq_hz);
else
ret = clk_set_rate(policy->aclk_clk, freq_hz);
return ret;
}
static unsigned long adsp_clk_get_rate(struct adsp_dfs_policy *policy)
{
return clk_get_rate(policy->adsp_clk);
}
static void adsp_cpu_freq_table_setup(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
if (adsp_cpu_freq_table)
return;
if (of_device_is_compatible(node, "nvidia,tegra210-adsp")) {
adsp_cpu_freq_table = adsp_cpu_freq_table_t21x;
adsp_cpu_freq_table_size = ARRAY_SIZE(adsp_cpu_freq_table_t21x);
} else {
adsp_cpu_freq_table = adsp_cpu_freq_table_t18x;
adsp_cpu_freq_table_size = ARRAY_SIZE(adsp_cpu_freq_table_t18x);
}
}
/* Expects and returns freq in Hz as table is formmed in terms of Hz */
static unsigned long adsp_get_target_freq(unsigned long tfreq, int *index)
{
int i;
int size = adsp_cpu_freq_table_size;
if (tfreq <= adsp_cpu_freq_table[0]) {
*index = 0;
return adsp_cpu_freq_table[0];
}
if (tfreq >= adsp_cpu_freq_table[size - 1]) {
*index = size - 1;
return adsp_cpu_freq_table[size - 1];
}
for (i = 1; i < size; i++) {
if ((tfreq <= adsp_cpu_freq_table[i]) &&
(tfreq > adsp_cpu_freq_table[i - 1])) {
*index = i;
return adsp_cpu_freq_table[i];
}
}
return 0;
}
static struct adsp_dfs_policy dfs_policy = {
.enable = 1,
.clk_name = "adsp_cpu",
};
static int adsp_update_freq_handshake(unsigned long tfreq_hz, int index)
{
struct nvadsp_mbox *mbx = &policy->mbox;
enum adsp_dfs_reply reply;
int ret;
dev_dbg(device, "sending change in freq(hz):%lu\n", tfreq_hz);
/*
* Ask adsp to do action upon change in freq. ADSP and Host need to
* maintain the same freq table.
*/
ret = nvadsp_mbox_send(mbx, index,
NVADSP_MBOX_SMSG, true, 100);
if (ret) {
dev_err(device, "%s:host to adsp, mbox_send failure. ret:%d\n",
__func__, ret);
policy->update_freq_flag = false;
goto err_out;
}
ret = nvadsp_mbox_recv(&policy->mbox, &reply, true, MBOX_TIMEOUT);
if (ret) {
dev_err(device, "%s:host to adsp, mbox_receive failure. ret:%d\n",
__func__, ret);
policy->update_freq_flag = false;
goto err_out;
}
switch (reply) {
case ACK:
/* Set Update freq flag */
dev_dbg(device, "adsp freq change status:ACK\n");
policy->update_freq_flag = true;
break;
case NACK:
/* Set Update freq flag */
dev_dbg(device, "adsp freq change status:NACK\n");
policy->update_freq_flag = false;
break;
default:
dev_err(device, "Error: adsp freq change status\n");
}
dev_dbg(device, "%s:status received from adsp: %s, tfreq(hz):%lu\n",
__func__,
policy->update_freq_flag == true ? "ACK" : "NACK",
tfreq_hz);
err_out:
return ret;
}
/*
* update_freq - update adsp freq and ask adsp to change timer as
* change in adsp freq.
* freq_khz - target frequency in KHz
* return - final freq got set.
* - 0, incase of error.
*
* Note - Policy->cur would be updated via rate
* change notifier, when freq is changed in hw
*
*/
static unsigned long update_freq(unsigned long freq_khz)
{
struct nvadsp_drv_data *drv = dev_get_drvdata(device);
unsigned long tfreq_hz, old_freq_khz;
u32 efreq;
int index;
int ret;
if (!is_os_running(device)) {
dev_err(device, "adsp os is not running\n");
return 0;
}
tfreq_hz = adsp_get_target_freq(freq_khz * 1000, &index);
if (!tfreq_hz) {
dev_err(device, "unable get the target freq\n");
return 0;
}
old_freq_khz = policy->cur;
if ((tfreq_hz / 1000) == old_freq_khz) {
dev_dbg(device, "old and new target_freq is same\n");
return 0;
}
ret = adsp_clk_set_rate(policy, tfreq_hz);
if (ret) {
dev_err(device, "failed to set adsp freq:%luhz err:%d\n",
tfreq_hz, ret);
policy->update_freq_flag = false;
return 0;
}
efreq = adsp_to_emc_freq(tfreq_hz / 1000);
ret = nvadsp_set_bw(drv, efreq);
if (ret) {
policy->update_freq_flag = false;
goto err_out;
}
/*
* On tegra > t210, as os_args->adsp_freq_hz is used to know adsp cpu
* clk rate and there is no need to set up timer prescalar. So skip
* communicating adsp cpu clk rate update to adspos using mbox
*/
if (!of_device_is_compatible(device->of_node, "nvidia,tegra210-adsp"))
policy->update_freq_flag = true;
else
adsp_update_freq_handshake(tfreq_hz, index);
/*
* Use os_args->adsp_freq_hz to update adsp cpu clk rate
* for adspos firmware, which uses this shared variable
* to get the clk rate for EDF, etc.
*/
if (policy->update_freq_flag) {
struct nvadsp_shared_mem *sm = drv->shared_adsp_os_data;
sm->os_args.adsp_freq_hz = tfreq_hz;
}
err_out:
if (!policy->update_freq_flag) {
ret = adsp_clk_set_rate(policy, old_freq_khz * 1000);
if (ret) {
dev_err(device, "failed to resume adsp freq(khz):%lu\n",
old_freq_khz);
policy->update_freq_flag = false;
}
efreq = adsp_to_emc_freq(old_freq_khz);
ret = nvadsp_set_bw(drv, efreq);
if (ret)
policy->update_freq_flag = false;
tfreq_hz = old_freq_khz * 1000;
}
return tfreq_hz / 1000;
}
/* Set adsp dfs policy min freq(Khz) */
static int policy_min_set(void *data, u64 val)
{
int ret = -EINVAL;
unsigned long min = (unsigned long)val;
if (!is_os_running(device))
return ret;
mutex_lock(&policy_mutex);
if (!policy->enable) {
dev_err(device, "adsp dfs policy is not enabled\n");
goto exit_out;
}
if (min == policy->min)
goto exit_out;
else if (min < policy->cpu_min)
min = policy->cpu_min;
else if (min >= policy->cpu_max)
min = policy->cpu_max;
if (min > policy->cur) {
min = update_freq(min);
if (min)
policy->cur = min;
}
if (min)
policy->min = min;
ret = 0;
exit_out:
mutex_unlock(&policy_mutex);
return ret;
}
#ifdef CONFIG_DEBUG_FS
#define RW_MODE (S_IWUSR | S_IRUSR)
#define RO_MODE S_IRUSR
/* Get adsp dfs staus: 0: disabled, 1: enabled */
static int dfs_enable_get(void *data, u64 *val)
{
mutex_lock(&policy_mutex);
*val = policy->enable;
mutex_unlock(&policy_mutex);
return 0;
}
/* Enable/disable adsp dfs */
static int dfs_enable_set(void *data, u64 val)
{
mutex_lock(&policy_mutex);
policy->enable = (bool) val;
mutex_unlock(&policy_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(enable_fops, dfs_enable_get,
dfs_enable_set, "%llu\n");
/* Get adsp dfs policy min freq(KHz) */
static int policy_min_get(void *data, u64 *val)
{
if (!is_os_running(device))
return -EINVAL;
mutex_lock(&policy_mutex);
*val = policy->min;
mutex_unlock(&policy_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(min_fops, policy_min_get,
policy_min_set, "%llu\n");
/* Get adsp dfs policy max freq(KHz) */
static int policy_max_get(void *data, u64 *val)
{
if (!is_os_running(device))
return -EINVAL;
mutex_lock(&policy_mutex);
*val = policy->max;
mutex_unlock(&policy_mutex);
return 0;
}
/* Set adsp dfs policy max freq(KHz) */
static int policy_max_set(void *data, u64 val)
{
int ret = -EINVAL;
unsigned long max = (unsigned long)val;
if (!is_os_running(device))
return ret;
mutex_lock(&policy_mutex);
if (!policy->enable) {
dev_err(device, "adsp dfs policy is not enabled\n");
goto exit_out;
}
if (!max || ((max > policy->cpu_max) || (max == policy->max)))
goto exit_out;
else if (max <= policy->cpu_min)
max = policy->cpu_min;
if (max < policy->cur)
max = update_freq(max);
if (max)
policy->cur = policy->max = max;
ret = 0;
exit_out:
mutex_unlock(&policy_mutex);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(max_fops, policy_max_get,
policy_max_set, "%llu\n");
/* Get adsp dfs policy's current freq */
static int policy_cur_get(void *data, u64 *val)
{
if (!is_os_running(device))
return -EINVAL;
mutex_lock(&policy_mutex);
*val = policy->cur;
mutex_unlock(&policy_mutex);
return 0;
}
/* Set adsp dfs policy cur freq(Khz) */
static int policy_cur_set(void *data, u64 val)
{
int ret = -EINVAL;
unsigned long cur = (unsigned long)val;
if (!is_os_running(device))
return ret;
mutex_lock(&policy_mutex);
if (policy->enable) {
dev_err(device, "adsp dfs is enabled, should be disabled first\n");
goto exit_out;
}
if (!cur || cur == policy->cur)
goto exit_out;
/* Check tfreq policy sanity */
if (cur < policy->min)
cur = policy->min;
else if (cur > policy->max)
cur = policy->max;
cur = update_freq(cur);
if (cur)
policy->cur = cur;
ret = 0;
exit_out:
mutex_unlock(&policy_mutex);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(cur_fops, policy_cur_get,
policy_cur_set, "%llu\n");
static void adspfreq_stats_update(void)
{
unsigned long long cur_time;
cur_time = get_jiffies_64();
freq_stats.time_in_state[freq_stats.last_index] += cur_time -
freq_stats.last_time;
freq_stats.last_time = cur_time;
}
/*
* Print residency in each freq levels
*/
static void dump_stats_table(struct seq_file *s, struct adsp_freq_stats *fstats)
{
int i;
mutex_lock(&policy_mutex);
if (is_os_running(device))
adspfreq_stats_update();
for (i = 0; i < fstats->state_num; i++) {
u64 jiffies64 = nsecs_to_jiffies64(fstats->time_in_state[i]);
seq_printf(s, "%lu %llu\n",
(long unsigned int)(adsp_cpu_freq_table[i] / 1000),
jiffies_64_to_clock_t(jiffies64));
}
mutex_unlock(&policy_mutex);
}
static int show_time_in_state(struct seq_file *s, void *data)
{
struct adsp_freq_stats *fstats =
(struct adsp_freq_stats *) (s->private);
dump_stats_table(s, fstats);
return 0;
}
static int stats_open(struct inode *inode, struct file *file)
{
return single_open(file, show_time_in_state, inode->i_private);
}
static const struct file_operations time_in_state_fops = {
.open = stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int adsp_dfs_debugfs_init(struct platform_device *pdev)
{
int ret = -ENOMEM;
struct dentry *d, *root;
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
if (!drv->adsp_debugfs_root)
return ret;
root = debugfs_create_dir("adsp_dfs", drv->adsp_debugfs_root);
if (!root)
return ret;
policy->root = root;
d = debugfs_create_file("enable", RW_MODE, root, NULL,
&enable_fops);
if (!d)
goto err_out;
d = debugfs_create_file("min_freq", RW_MODE, root, NULL,
&min_fops);
if (!d)
goto err_out;
d = debugfs_create_file("max_freq", RW_MODE, root,
NULL, &max_fops);
if (!d)
goto err_out;
d = debugfs_create_file("cur_freq", RW_MODE, root, NULL,
&cur_fops);
if (!d)
goto err_out;
d = debugfs_create_file("time_in_state", RO_MODE,
root, &freq_stats,
&time_in_state_fops);
if (!d)
goto err_out;
return 0;
err_out:
debugfs_remove_recursive(root);
policy->root = NULL;
dev_err(&pdev->dev,
"unable to create adsp logger debug fs file\n");
return ret;
}
#endif
/*
* Set target freq.
* @params:
* freq: adsp freq in KHz
*/
void adsp_cpu_set_rate(unsigned long freq)
{
mutex_lock(&policy_mutex);
if (!policy->enable) {
dev_dbg(device, "adsp dfs policy is not enabled\n");
goto exit_out;
}
if (freq < policy->min)
freq = policy->min;
else if (freq > policy->max)
freq = policy->max;
freq = update_freq(freq);
if (freq)
policy->cur = freq;
exit_out:
mutex_unlock(&policy_mutex);
}
/*
* Override adsp freq and reinit actmon counters
*
* @params:
* freq: adsp freq in KHz
* return - final freq set
* - 0 incase of error
*
*/
unsigned long adsp_override_freq(unsigned long req_freq_khz)
{
unsigned long ret_freq = 0, freq;
int index;
if (!is_os_running(device)) {
pr_err("%s: adsp os is not in running state.\n", __func__);
return 0;
}
mutex_lock(&policy_mutex);
freq = req_freq_khz;
if (freq < policy->min)
freq = policy->min;
else if (freq > policy->max)
freq = policy->max;
freq = adsp_get_target_freq(freq * 1000, &index);
if (!freq) {
dev_warn(device,
"req freq:%lukhz. unable get the target freq.\n",
req_freq_khz);
goto exit_out;
}
freq = freq / 1000; /* In KHz */
if (freq == policy->cur) {
ret_freq = freq;
goto exit_out;
}
policy->ovr_freq = freq;
ret_freq = update_freq(freq);
if (ret_freq)
policy->cur = ret_freq;
if (ret_freq != freq) {
dev_warn(device,
"req freq:%lukhz. freq override to %lukhz rejected.\n",
req_freq_khz, freq);
policy->ovr_freq = 0;
goto exit_out;
}
exit_out:
mutex_unlock(&policy_mutex);
return ret_freq;
}
EXPORT_SYMBOL(adsp_override_freq);
/*
* Set min ADSP freq.
*
* @params:
* freq: adsp freq in KHz
*/
void adsp_update_dfs_min_rate(unsigned long freq)
{
policy_min_set(NULL, freq);
}
EXPORT_SYMBOL(adsp_update_dfs_min_rate);
/* Enable / disable dynamic freq scaling */
void adsp_update_dfs(bool val)
{
mutex_lock(&policy_mutex);
policy->enable = val;
mutex_unlock(&policy_mutex);
}
/* Should be called after ADSP os is loaded */
int adsp_dfs_core_init(struct platform_device *pdev)
{
int size = adsp_cpu_freq_table_size;
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
uint16_t mid = HOST_ADSP_DFS_MBOX_ID;
int ret = 0;
u32 efreq;
if (drv->dfs_initialized)
return 0;
device = &pdev->dev;
policy = &dfs_policy;
/* Set up adsp cpu freq table as per chip */
if (!adsp_cpu_freq_table)
adsp_cpu_freq_table_setup(pdev);
ret = adsp_clk_get(policy);
if (ret)
goto end;
policy->max = policy->cpu_max = drv->adsp_freq; /* adsp_freq in KHz */
policy->min = policy->cpu_min = adsp_cpu_freq_table[0] / 1000;
policy->cur = adsp_clk_get_rate(policy) / 1000;
efreq = adsp_to_emc_freq(policy->cur);
ret = nvadsp_set_bw(drv, efreq);
if (ret)
goto end;
adsp_get_target_freq(policy->cur * 1000, &freq_stats.last_index);
freq_stats.last_time = get_jiffies_64();
freq_stats.state_num = size;
freq_stats.dev = &pdev->dev;
memset(&freq_stats.time_in_state, 0, sizeof(freq_stats.time_in_state));
ret = nvadsp_mbox_open(&policy->mbox, &mid, "dfs_comm", NULL, NULL);
if (ret) {
dev_info(&pdev->dev, "unable to open mailbox. ret:%d\n", ret);
goto end;
}
#ifdef CONFIG_DEBUG_FS
adsp_dfs_debugfs_init(pdev);
#endif
drv->dfs_initialized = true;
dev_dbg(&pdev->dev, "adsp dfs initialized ....\n");
return ret;
end:
adsp_clk_put(policy);
return ret;
}
int adsp_dfs_core_exit(struct platform_device *pdev)
{
status_t ret = 0;
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
/* return if dfs is not initialized */
if (!drv->dfs_initialized)
return -ENODEV;
ret = nvadsp_mbox_close(&policy->mbox);
if (ret)
dev_info(&pdev->dev,
"adsp dfs exit failed: mbox close error. ret:%d\n", ret);
adsp_clk_put(policy);
drv->dfs_initialized = false;
dev_dbg(&pdev->dev, "adsp dfs has exited ....\n");
return ret;
}

View File

@@ -0,0 +1,255 @@
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/tegra_nvadsp.h>
#include "dev.h"
#define RW_MODE (S_IWUSR | S_IRUGO)
enum adsp_lpthread_state {
ADSP_LPTHREAD_STOP,
ADSP_LPTHREAD_START,
ADSP_LPTHREAD_PAUSE,
};
struct adsp_lpthread_shared_state_t {
uint16_t mbox_id;
};
enum adsp_lpthread_mbx_cmd {
ADSP_LPTHREAD_CMD_RESUME = 0,
ADSP_LPTHREAD_CMD_PAUSE,
ADSP_LPTHREAD_CMD_CLOSE,
};
struct adsp_lpthread {
bool lpthread_initialized;
bool adsp_os_suspended;
bool lpthread_paused;
bool lpthread_resumed;
bool lpthread_closed;
nvadsp_app_handle_t app_handle;
nvadsp_app_info_t *app_info;
};
static struct adsp_lpthread lpthread_obj;
static struct adsp_lpthread *lpthread;
static struct nvadsp_mbox mbox;
static struct adsp_lpthread_shared_state_t *adsp_lpthread;
/* Initialize adsp_lpthread app and mailbox */
int adsp_lpthread_init(bool is_adsp_suspended)
{
nvadsp_app_handle_t handle;
nvadsp_app_info_t *app_info;
int ret;
handle = nvadsp_app_load("adsp_lpthread", "adsp_lpthread.elf");
if (!handle)
return -1;
app_info = nvadsp_app_init(handle, NULL);
if (IS_ERR_OR_NULL(app_info)) {
pr_err("unable to init app adsp_lpthread\n");
return -1;
}
ret = nvadsp_app_start(app_info);
if (ret) {
pr_err("unable to start app adsp_lpthread\n");
return -1;
}
lpthread->app_info = app_info;
lpthread->app_handle = handle;
adsp_lpthread =
(struct adsp_lpthread_shared_state_t *)app_info->mem.shared;
ret = nvadsp_mbox_open(&mbox, &adsp_lpthread->mbox_id,
"adsp_lpthread", NULL, NULL);
if (ret) {
pr_err("Failed to open mbox %d for adsp_lpthread app",
adsp_lpthread->mbox_id);
return -1;
}
/* Start timer is adsp is not in suspended state */
if (!is_adsp_suspended) {
ret = adsp_lpthread_resume();
return ret;
}
return 0;
}
int adsp_lpthread_resume(void)
{
int ret;
ret = nvadsp_mbox_send(&mbox, ADSP_LPTHREAD_CMD_RESUME,
NVADSP_MBOX_SMSG, 0, 0);
if (ret)
pr_err("%s: nvadsp_mbox_send() failed: %d, ret = %d\n",
__func__, adsp_lpthread->mbox_id, ret);
return ret;
}
int adsp_lpthread_pause(void)
{
int ret;
ret = nvadsp_mbox_send(&mbox, ADSP_LPTHREAD_CMD_PAUSE,
NVADSP_MBOX_SMSG, 0, 0);
if (ret)
pr_err("%s: nvadsp_mbox_send() failed: %d, ret = %d\n",
__func__, adsp_lpthread->mbox_id, ret);
return ret;
}
int adsp_lpthread_uninit(void)
{
int ret;
ret = nvadsp_mbox_send(&mbox, ADSP_LPTHREAD_CMD_CLOSE,
NVADSP_MBOX_SMSG, 0, 0);
if (ret)
pr_err("%s: nvadsp_mbox_send() failed: %d, ret = %d\n",
__func__, adsp_lpthread->mbox_id, ret);
nvadsp_mbox_close(&mbox);
nvadsp_exit_app((nvadsp_app_info_t *)lpthread->app_info, false);
nvadsp_app_unload((const void *)lpthread->app_handle);
return ret;
}
int adsp_usage_set(unsigned int val)
{
int ret = 0;
switch (val) {
case ADSP_LPTHREAD_START:
if (lpthread->lpthread_initialized &&
lpthread->lpthread_resumed) {
pr_info("ADSP Usage App already running\n");
break;
}
if (!lpthread->lpthread_initialized) {
ret = adsp_lpthread_init(lpthread->adsp_os_suspended);
pr_info("Initializing lpthread\n");
lpthread->lpthread_initialized = true;
} else {
ret = adsp_lpthread_resume();
pr_info("Resuming lpthread\n");
}
lpthread->lpthread_resumed = true;
lpthread->lpthread_paused = false;
lpthread->lpthread_closed = false;
break;
case ADSP_LPTHREAD_PAUSE:
if (!lpthread->lpthread_initialized) {
pr_info("ADSP Usage App not initialized\n");
break;
}
pr_info("Pausing lpthread\n");
ret = adsp_lpthread_pause();
lpthread->lpthread_resumed = false;
lpthread->lpthread_paused = true;
lpthread->lpthread_closed = false;
break;
case ADSP_LPTHREAD_STOP:
if (!lpthread->lpthread_initialized) {
pr_info("ADSP Usage App not initialized\n");
break;
}
pr_info("Exiting lpthread\n");
ret = adsp_lpthread_uninit();
lpthread->lpthread_resumed = false;
lpthread->lpthread_paused = false;
lpthread->lpthread_closed = true;
lpthread->lpthread_initialized = false;
break;
default:
pr_err("ADSP Usage App: Invalid input\n");
ret = 0;
}
return ret;
}
EXPORT_SYMBOL(adsp_usage_set);
unsigned int adsp_usage_get(void)
{
if (lpthread->lpthread_initialized && lpthread->lpthread_resumed)
return ADSP_LPTHREAD_START;
if (lpthread->lpthread_initialized && lpthread->lpthread_paused)
return ADSP_LPTHREAD_PAUSE;
return ADSP_LPTHREAD_STOP;
}
EXPORT_SYMBOL(adsp_usage_get);
int adsp_lpthread_entry(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
lpthread = &lpthread_obj;
drv->lpthread_initialized = true;
lpthread->adsp_os_suspended = false;
return 0;
}
int adsp_lpthread_exit(struct platform_device *pdev)
{
status_t ret = 0;
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
if (!drv->lpthread_initialized)
ret = -EINVAL;
drv->lpthread_initialized = false;
return ret;
}
int adsp_lpthread_set_suspend(bool is_suspended)
{
lpthread->adsp_os_suspended = is_suspended;
return 0;
}
int adsp_lpthread_get_state(void)
{
if (lpthread->lpthread_initialized && lpthread->lpthread_resumed)
return 1;
else
return 0;
}

View File

@@ -0,0 +1,184 @@
/*
* adsp_shared_struct.h
*
* A header file containing shared data structures shared with ADSP OS
*
* Copyright (C) 2015-2022 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADSP_SHARED_STRUCT
#define __ADSP_SHARED_STRUCT
#include <linux/tegra_nvadsp.h>
#define APP_LOADER_MBOX_ID 1
#define ADSP_APP_FLAG_START_ON_BOOT 0x1
#define ADSP_OS_LOAD_TIMEOUT 5000 /* 5000 ms */
#define DRAM_DEBUG_LOG_SIZE 0x4000 /* 16 KB */
#define NVADSP_NAME_SZ 128
struct app_mem_size {
uint64_t dram;
uint64_t dram_shared;
uint64_t dram_shared_wc;
uint64_t aram;
uint64_t aram_x;
} __packed;
struct adsp_shared_app {
char name[NVADSP_NAME_SZ];
struct app_mem_size mem_size;
int32_t mod_ptr;
int32_t flags;
int32_t dram_data_ptr;
int32_t shared_data_ptr;
int32_t shared_wc_data_ptr;
char version[16];
} __packed;
/* ADSP app loader message queue */
struct run_app_instance_data {
uint32_t adsp_mod_ptr;
uint64_t host_ref;
uint32_t adsp_ref;
uint32_t dram_data_ptr;
uint32_t dram_shared_ptr;
uint32_t dram_shared_wc_ptr;
uint32_t aram_ptr;
uint32_t aram_flag;
uint32_t aram_x_ptr;
uint32_t aram_x_flag;
struct app_mem_size mem_size;
nvadsp_app_args_t app_args;
uint32_t stack_size;
uint32_t core_id;
uint32_t message;
} __packed;
struct app_loader_data {
int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
struct run_app_instance_data app_init;
} __packed;
union app_loader_message {
msgq_message_t msgq_msg;
struct app_loader_data data;
} __aligned(4);
struct adsp_os_message_header {
int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
uint32_t message;
} __packed;
/* ADSP app complete message queue */
struct app_complete_status_data {
struct adsp_os_message_header header;
uint64_t host_ref;
uint32_t adsp_ref;
int32_t status;
} __packed;
struct adsp_static_app_data {
struct adsp_os_message_header header;
struct adsp_shared_app shared_app;
} __packed;
union app_complete_status_message {
msgq_message_t msgq_msg;
struct app_complete_status_data complete_status_data;
struct adsp_static_app_data static_app_data;
} __aligned(4);
/*ADSP message pool structure */
#define ADSP_MAX_MSGQ_SIZE 8192
#define ADSP_MAX_MSGQ_WSIZE (ADSP_MAX_MSGQ_SIZE / sizeof(int32_t))
#define ADSP_MSGQ_MAX_QUEUE_WSIZE \
(ADSP_MAX_MSGQ_WSIZE - (int32_t)MSGQ_HEADER_WSIZE)
union app_loader_msgq {
msgq_t msgq;
struct {
int32_t header[MSGQ_HEADER_WSIZE];
int32_t queue[ADSP_MSGQ_MAX_QUEUE_WSIZE];
};
};
/* ADSP APP shared message pool */
#pragma pack(8)
struct nvadsp_app_shared_msg_pool {
union app_loader_msgq app_loader_send_message;
union app_loader_msgq app_loader_recv_message;
};
#pragma pack()
/*ADSP shated OS args */
struct nvadsp_os_args {
uint64_t adsp_freq_hz;
int32_t timer_prescalar;
char logger[DRAM_DEBUG_LOG_SIZE];
uint32_t dynamic_app_support;
uint32_t chip_id;
char reserved[120];
} __packed;
/* ARM MODE REGS */
struct arm_mode_regs_shared {
uint32_t fiq_r13, fiq_r14;
uint32_t irq_r13, irq_r14;
uint32_t svc_r13, svc_r14;
uint32_t abt_r13, abt_r14;
uint32_t und_r13, und_r14;
uint32_t sys_r13, sys_r14;
} __packed;
/* ARM FAULT FRAME */
struct arm_fault_frame_shared {
uint32_t spsr;
uint32_t usp;
uint32_t ulr;
uint32_t r[13];
uint32_t pc;
} __packed;
/* ADSP ARM EXCEPTION CONTEXT */
struct nvadsp_exception_context {
struct arm_fault_frame_shared frame;
struct arm_mode_regs_shared regs;
uint32_t stack_addr;
uint32_t stack_dump[32];
uint32_t exception_reason;
} __packed;
/* ADSP OS info/status. Keep in sync with firmware. */
#define MAX_OS_VERSION_BUF 32
struct nvadsp_os_info {
char version[MAX_OS_VERSION_BUF];
char reserved[128];
} __packed;
/* ADSP OS shared memory */
#pragma pack(8)
struct nvadsp_shared_mem {
struct nvadsp_app_shared_msg_pool app_shared_msg_pool;
struct nvadsp_os_args os_args;
struct nvadsp_os_info os_info;
struct nvadsp_exception_context exception_context;
};
#pragma pack()
#endif /* __ADSP_SHARED_STRUCT */

View File

@@ -0,0 +1,716 @@
/*
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#define pr_fmt(fmt) "adspff: " fmt
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
#include <linux/semaphore.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/tegra_nvadsp.h>
#include <uapi/linux/sched/types.h>
#include "adspff.h"
#include "dev.h"
#define ADSPFF_MAX_OPEN_FILES (32)
struct file_struct {
struct file *fp;
uint8_t file_name[ADSPFF_MAX_FILENAME_SIZE];
unsigned int flags;
unsigned long long wr_offset;
unsigned long long rd_offset;
struct list_head list;
};
static struct list_head file_list;
static spinlock_t adspff_lock;
static int open_count;
/******************************************************************************
* Kernel file functions
******************************************************************************/
static struct file *file_open(const char *path, int flags, int rights)
{
struct file *filp = NULL;
mm_segment_t oldfs;
int err = 0;
oldfs = get_fs();
set_fs(KERNEL_DS);
filp = filp_open(path, flags, rights);
set_fs(oldfs);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
return NULL;
}
return filp;
}
static void file_close(struct file *file)
{
filp_close(file, NULL);
}
static int file_write(struct file *file, unsigned long long *offset,
unsigned char *data, unsigned int size)
{
mm_segment_t oldfs;
int ret = 0;
oldfs = get_fs();
set_fs(KERNEL_DS);
ret = vfs_write(file, (const char __user *)data, size, offset);
set_fs(oldfs);
return ret;
}
static uint32_t file_read(struct file *file, unsigned long long *offset,
unsigned char *data, unsigned int size)
{
mm_segment_t oldfs;
uint32_t ret = 0;
oldfs = get_fs();
set_fs(KERNEL_DS);
ret = vfs_read(file, (char __user *)data, size, offset);
set_fs(oldfs);
return ret;
}
static uint32_t file_size(struct file *file)
{
mm_segment_t oldfs;
uint32_t size = 0;
oldfs = get_fs();
set_fs(KERNEL_DS);
size = vfs_llseek(file, 0, SEEK_END);
vfs_llseek(file, 0, SEEK_SET);
set_fs(oldfs);
return size;
}
/******************************************************************************
* ADSPFF file functions
******************************************************************************/
static struct adspff_shared_state_t *adspff;
static struct nvadsp_mbox rx_mbox;
/** *
* w - open for writing (file need not exist) *
* a - open for appending (file need not exist) *
* r+ - open for reading and writing, start at beginning *
* w+ - open for reading and writing (overwrite file) *
* a+ - open for reading and writing (append if file exists) */
static void set_flags(union adspff_message_t *m, unsigned int *flags)
{
if (0 == strcmp(m->msg.payload.fopen_msg.modes, "r+"))
*flags = O_RDWR;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "w+"))
*flags = O_CREAT | O_RDWR | O_TRUNC;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "a+"))
*flags = O_APPEND | O_RDWR;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "r"))
*flags = O_RDONLY;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "w"))
*flags = O_CREAT | O_WRONLY | O_TRUNC;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "a"))
*flags = O_CREAT | O_APPEND | O_WRONLY;
else
*flags = O_CREAT | O_RDWR;
}
/*
* checks if file is already opened
* if yes, then returns the struct file_struct for the file
* if no, then allocates a file_struct and adds to the list
* and returns the pointer to the newly allocated file_struct
* if ADSPFF_MAX_OPEN_FILES already open, returns NULL
*/
static struct file_struct *check_file_opened(const char *path)
{
struct file_struct *file = NULL;
struct list_head *pos;
/* assuming files opened by ADSP will
* never be actually closed in kernel
*/
list_for_each(pos, &file_list) {
file = list_entry(pos, struct file_struct, list);
if (!file->fp)
break;
if (!strncmp(path, file->file_name,
ADSPFF_MAX_FILENAME_SIZE)) {
break;
}
file = NULL;
}
if (file != NULL)
return file;
if (open_count == ADSPFF_MAX_OPEN_FILES) {
pr_err("adspff: %d files already opened\n",
ADSPFF_MAX_OPEN_FILES);
file = NULL;
} else {
file = kzalloc(sizeof(*file), GFP_KERNEL);
open_count++;
list_add_tail(&file->list, &file_list);
}
return file;
}
static void adspff_fopen(void)
{
union adspff_message_t *message;
union adspff_message_t *msg_recv;
unsigned int flags = 0;
int ret = 0;
struct file_struct *file;
message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
if (!message)
return;
msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
if (!msg_recv) {
kfree(message);
return;
}
message->msgq_msg.size = MSGQ_MSG_SIZE(struct fopen_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)message);
if (ret < 0) {
pr_err("fopen Dequeue failed %d.", ret);
kfree(message);
kfree(msg_recv);
return;
}
file = check_file_opened(message->msg.payload.fopen_msg.fname);
if (file && !file->fp) {
/* open a new file */
set_flags(message, &flags);
pr_info("adspff: opening file %s\n",
message->msg.payload.fopen_msg.fname);
file->fp = file_open(
(const char *)message->msg.payload.fopen_msg.fname,
flags, 0777); /* S_IRWXU | S_IRWXG | S_IRWXO */
file->wr_offset = 0;
file->rd_offset = 0;
memcpy(file->file_name,
message->msg.payload.fopen_msg.fname,
ADSPFF_MAX_FILENAME_SIZE);
file->flags = flags;
}
if (file && !file->fp) {
file = NULL;
pr_err("File not found - %s\n",
(const char *) message->msg.payload.fopen_msg.fname);
}
msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct fopen_recv_msg_t);
msg_recv->msg.payload.fopen_recv_msg.file = (int64_t)file;
ret = msgq_queue_message(&adspff->msgq_recv.msgq,
(msgq_message_t *)msg_recv);
if (ret < 0) {
pr_err("fopen Enqueue failed %d.", ret);
if (file) {
file_close(file->fp);
file->fp = NULL;
}
kfree(message);
kfree(msg_recv);
return;
}
nvadsp_mbox_send(&rx_mbox, adspff_cmd_fopen_recv,
NVADSP_MBOX_SMSG, 0, 0);
kfree(message);
kfree(msg_recv);
}
static inline unsigned int is_read_file(struct file_struct *file)
{
return ((!file->flags) || (file->flags & O_RDWR));
}
static inline unsigned int is_write_file(struct file_struct *file)
{
return file->flags & (O_WRONLY | O_RDWR);
}
static void adspff_fclose(void)
{
union adspff_message_t *message;
struct file_struct *file = NULL;
int32_t ret = 0;
message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
if (!message)
return;
message->msgq_msg.size = MSGQ_MSG_SIZE(struct fclose_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)message);
if (ret < 0) {
pr_err("fclose Dequeue failed %d.", ret);
kfree(message);
return;
}
file = (struct file_struct *)message->msg.payload.fclose_msg.file;
if (file) {
if ((file->flags & O_APPEND) == 0) {
if (is_read_file(file))
file->rd_offset = 0;
if (is_write_file(file))
file->wr_offset = 0;
}
}
kfree(message);
}
static void adspff_fsize(void)
{
union adspff_message_t *msg_recv;
union adspff_message_t message;
struct file_struct *file = NULL;
int32_t ret = 0;
uint32_t size = 0;
msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct ack_msg_t);
message.msgq_msg.size = MSGQ_MSG_SIZE(struct fsize_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)&message);
if (ret < 0) {
pr_err("fsize Dequeue failed %d.", ret);
kfree(msg_recv);
return;
}
file = (struct file_struct *)message.msg.payload.fsize_msg.file;
if (file) {
size = file_size(file->fp);
}
/* send ack */
msg_recv->msg.payload.ack_msg.size = size;
ret = msgq_queue_message(&adspff->msgq_recv.msgq,
(msgq_message_t *)msg_recv);
if (ret < 0) {
pr_err("fsize Enqueue failed %d.", ret);
kfree(msg_recv);
return;
}
nvadsp_mbox_send(&rx_mbox, adspff_cmd_ack,
NVADSP_MBOX_SMSG, 0, 0);
kfree(msg_recv);
}
static void adspff_fwrite(void)
{
union adspff_message_t message;
union adspff_message_t *msg_recv;
struct file_struct *file = NULL;
int ret = 0;
uint32_t size = 0;
uint32_t bytes_to_write = 0;
uint32_t bytes_written = 0;
msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
if (!msg_recv)
return;
msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct ack_msg_t);
message.msgq_msg.size = MSGQ_MSG_SIZE(struct fwrite_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)&message);
if (ret < 0) {
pr_err("fwrite Dequeue failed %d.", ret);
kfree(msg_recv);
return;
}
file = (struct file_struct *)message.msg.payload.fwrite_msg.file;
size = message.msg.payload.fwrite_msg.size;
bytes_to_write = ((adspff->write_buf.read_index + size) < ADSPFF_SHARED_BUFFER_SIZE) ?
size : (ADSPFF_SHARED_BUFFER_SIZE - adspff->write_buf.read_index);
ret = file_write(file->fp, &file->wr_offset,
adspff->write_buf.data + adspff->write_buf.read_index, bytes_to_write);
bytes_written += ret;
if ((size - bytes_to_write) > 0) {
ret = file_write(file->fp, &file->wr_offset,
adspff->write_buf.data, size - bytes_to_write);
bytes_written += ret;
}
adspff->write_buf.read_index =
(adspff->write_buf.read_index + size) % ADSPFF_SHARED_BUFFER_SIZE;
/* send ack */
msg_recv->msg.payload.ack_msg.size = bytes_written;
ret = msgq_queue_message(&adspff->msgq_recv.msgq,
(msgq_message_t *)msg_recv);
if (ret < 0) {
pr_err("adspff: fwrite Enqueue failed %d.", ret);
kfree(msg_recv);
return;
}
nvadsp_mbox_send(&rx_mbox, adspff_cmd_ack,
NVADSP_MBOX_SMSG, 0, 0);
kfree(msg_recv);
}
static void adspff_fread(void)
{
union adspff_message_t *message;
union adspff_message_t *msg_recv;
struct file_struct *file = NULL;
uint32_t bytes_free;
uint32_t wi = adspff->read_buf.write_index;
uint32_t ri = adspff->read_buf.read_index;
uint8_t can_wrap = 0;
uint32_t size = 0, size_read = 0;
int32_t ret = 0;
if (ri <= wi) {
bytes_free = ADSPFF_SHARED_BUFFER_SIZE - wi + ri - 1;
can_wrap = 1;
} else {
bytes_free = ri - wi - 1;
can_wrap = 0;
}
message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
if (!message)
return;
msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
if (!msg_recv) {
kfree(message);
return;
}
msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct ack_msg_t);
message->msgq_msg.size = MSGQ_MSG_SIZE(struct fread_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)message);
if (ret < 0) {
pr_err("fread Dequeue failed %d.", ret);
kfree(message);
kfree(msg_recv);
return;
}
file = (struct file_struct *)message->msg.payload.fread_msg.file;
size = message->msg.payload.fread_msg.size;
if (bytes_free < size) {
size_read = 0;
goto send_ack;
}
if (can_wrap) {
uint32_t bytes_to_read = (size < (ADSPFF_SHARED_BUFFER_SIZE - wi)) ?
size : (ADSPFF_SHARED_BUFFER_SIZE - wi);
ret = file_read(file->fp, &file->rd_offset,
adspff->read_buf.data + wi, bytes_to_read);
size_read = ret;
if (ret < bytes_to_read)
goto send_ack;
if ((size - bytes_to_read) > 0) {
ret = file_read(file->fp, &file->rd_offset,
adspff->read_buf.data, size - bytes_to_read);
size_read += ret;
goto send_ack;
}
} else {
ret = file_read(file->fp, &file->rd_offset,
adspff->read_buf.data + wi, size);
size_read = ret;
goto send_ack;
}
send_ack:
msg_recv->msg.payload.ack_msg.size = size_read;
ret = msgq_queue_message(&adspff->msgq_recv.msgq,
(msgq_message_t *)msg_recv);
if (ret < 0) {
pr_err("fread Enqueue failed %d.", ret);
kfree(message);
kfree(msg_recv);
return;
}
adspff->read_buf.write_index =
(adspff->read_buf.write_index + size_read) % ADSPFF_SHARED_BUFFER_SIZE;
nvadsp_mbox_send(&rx_mbox, adspff_cmd_ack,
NVADSP_MBOX_SMSG, 0, 0);
kfree(message);
kfree(msg_recv);
}
#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
static const struct sched_param param = {
.sched_priority = 1,
};
#endif
static struct task_struct *adspff_kthread;
static struct list_head adspff_kthread_msgq_head;
static wait_queue_head_t wait_queue;
struct adspff_kthread_msg {
uint32_t msg_id;
struct list_head list;
};
static int adspff_kthread_fn(void *data)
{
int ret = 0;
struct adspff_kthread_msg *kmsg;
unsigned long flags;
while (1) {
ret = wait_event_interruptible(wait_queue, kthread_should_stop()
|| !list_empty(&adspff_kthread_msgq_head));
if (kthread_should_stop())
do_exit(0);
if (!list_empty(&adspff_kthread_msgq_head)) {
kmsg = list_first_entry(&adspff_kthread_msgq_head,
struct adspff_kthread_msg, list);
switch (kmsg->msg_id) {
case adspff_cmd_fopen:
adspff_fopen();
break;
case adspff_cmd_fclose:
adspff_fclose();
break;
case adspff_cmd_fwrite:
adspff_fwrite();
break;
case adspff_cmd_fread:
adspff_fread();
break;
case adspff_cmd_fsize:
adspff_fsize();
break;
default:
pr_warn("adspff: kthread unsupported msg %d\n",
kmsg->msg_id);
}
spin_lock_irqsave(&adspff_lock, flags);
list_del(&kmsg->list);
spin_unlock_irqrestore(&adspff_lock, flags);
kfree(kmsg);
}
}
do_exit(ret);
}
/******************************************************************************
* ADSP mailbox message handler
******************************************************************************/
static int adspff_msg_handler(uint32_t msg, void *data)
{
unsigned long flags;
struct adspff_kthread_msg *kmsg;
spin_lock_irqsave(&adspff_lock, flags);
kmsg = kzalloc(sizeof(*kmsg), GFP_ATOMIC);
if (!kmsg) {
spin_unlock_irqrestore(&adspff_lock, flags);
return -ENOMEM;
}
kmsg->msg_id = msg;
list_add_tail(&kmsg->list, &adspff_kthread_msgq_head);
wake_up(&wait_queue);
spin_unlock_irqrestore(&adspff_lock, flags);
return 0;
}
static int adspff_set(void *data, u64 val)
{
struct file_struct *file;
struct list_head *pos, *n;
if (val != 1)
return 0;
list_for_each_safe(pos, n, &file_list) {
file = list_entry(pos, struct file_struct, list);
list_del(pos);
if (file->fp)
file_close(file->fp);
kfree(file);
}
open_count = 0;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(adspff_fops, NULL, adspff_set, "%llu\n");
#ifdef CONFIG_DEBUG_FS
static int adspff_debugfs_init(struct nvadsp_drv_data *drv)
{
int ret = -ENOMEM;
struct dentry *d, *dir;
if (!drv->adsp_debugfs_root)
return ret;
dir = debugfs_create_dir("adspff", drv->adsp_debugfs_root);
if (!dir)
return ret;
d = debugfs_create_file(
"close_files", 0200, /* S_IWUSR */
dir, NULL, &adspff_fops);
if (!d)
return ret;
return 0;
}
#endif
int adspff_init(struct platform_device *pdev)
{
int ret = 0;
nvadsp_app_handle_t handle;
nvadsp_app_info_t *app_info;
#ifdef CONFIG_DEBUG_FS
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
#endif
handle = nvadsp_app_load("adspff", "adspff.elf");
if (!handle)
return -ENOENT;
app_info = nvadsp_app_init(handle, NULL);
if (IS_ERR_OR_NULL(app_info)) {
pr_err("unable to init app adspff\n");
return -1;
}
adspff_kthread = kthread_create(adspff_kthread_fn,
NULL, "adspp_kthread");
if ((adspff_kthread == ERR_PTR(-ENOMEM)) ||
(adspff_kthread == ERR_PTR(-EINTR))) {
pr_err("adspff kthread_create failed, error = %s\n",
(adspff_kthread == ERR_PTR(-ENOMEM)) ?
"-ENOMEM" : "-EINTR");
return -1;
}
adspff = ADSPFF_SHARED_STATE(app_info->mem.shared);
ret = nvadsp_mbox_open(&rx_mbox, &adspff->mbox_id,
"adspff", adspff_msg_handler, NULL);
if (ret < 0) {
pr_err("Failed to open mbox %d", adspff->mbox_id);
return -1;
}
spin_lock_init(&adspff_lock);
#ifdef CONFIG_DEBUG_FS
ret = adspff_debugfs_init(drv);
if (ret)
pr_warn("adspff: failed to create debugfs entry\n");
#endif
INIT_LIST_HEAD(&adspff_kthread_msgq_head);
INIT_LIST_HEAD(&file_list);
init_waitqueue_head(&wait_queue);
#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
sched_setscheduler(adspff_kthread, SCHED_FIFO, &param);
#else
sched_set_fifo_low(adspff_kthread);
#endif
get_task_struct(adspff_kthread);
wake_up_process(adspff_kthread);
return ret;
}
void adspff_exit(void)
{
nvadsp_mbox_close(&rx_mbox);
kthread_stop(adspff_kthread);
put_task_struct(adspff_kthread);
}

View File

@@ -0,0 +1,145 @@
/*
* tegra_adspff.h - Shared ADSPFF interface between Tegra ADSP File
* System driver and ADSP side user space code.
* Copyright (c) 2016-2019 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
#ifndef _TEGRA_ADSPFF_H_
#define _TEGRA_ADSPFF_H_
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* Defines
******************************************************************************/
/* TODO: fine tuning */
#define ADSPFF_MSG_QUEUE_WSIZE 1024
#define ADSPFF_WRITE_DATA_SIZE 512
#define ADSPFF_READ_DATA_SIZE 1024
#define ADSPFF_SHARED_BUFFER_SIZE (128 * 1024)
#define ADSPFF_MAX_FILENAME_SIZE (250)
/**
* adspff_mbx_cmd: commands exchanged using mailbox.
*
* @adspff_cmd_fopen: open file on host
* @adspff_cmd_fclose: close file on host
* @adspff_cmd_fwrite: write data in an open file on host
* @adspff_cmd_fread: read data from an open file on host
*/
enum adspff_mbx_cmd {
adspff_cmd_fopen = 0,
adspff_cmd_fclose,
adspff_cmd_fwrite,
adspff_cmd_fread,
adspff_cmd_fopen_recv,
adspff_cmd_ack,
adspff_cmd_fsize,
};
/******************************************************************************
* Types
******************************************************************************/
/* supported message payloads */
struct fopen_msg_t {
uint8_t fname[ADSPFF_MAX_FILENAME_SIZE];
uint8_t modes[3];
};
struct fwrite_msg_t {
int64_t file;
int32_t size;
};
struct fread_msg_t {
int64_t file;
int32_t size;
};
struct fclose_msg_t {
int64_t file;
};
struct fopen_recv_msg_t {
int64_t file;
};
struct fsize_msg_t {
int64_t file;
};
struct ack_msg_t {
int32_t size;
};
#pragma pack(4)
/* app message definition */
union adspff_message_t {
msgq_message_t msgq_msg;
struct {
int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
union {
struct fopen_msg_t fopen_msg;
struct fwrite_msg_t fwrite_msg;
struct fread_msg_t fread_msg;
struct fclose_msg_t fclose_msg;
struct fopen_recv_msg_t fopen_recv_msg;
struct ack_msg_t ack_msg;
struct fsize_msg_t fsize_msg;
} payload;
} msg;
};
/* app queue definition */
union adspff_msgq_t {
msgq_t msgq;
struct {
int32_t header[MSGQ_HEADER_WSIZE];
int32_t queue[ADSPFF_MSG_QUEUE_WSIZE];
} app_msgq;
};
#pragma pack()
#define MSGQ_MSG_SIZE(x) \
(((sizeof(x) + sizeof(int32_t) - 1) & (~(sizeof(int32_t)-1))) >> 2)
/**
* ADSPFF state structure shared between ADSP & CPU
*/
typedef struct {
uint32_t write_index;
uint32_t read_index;
uint8_t data[ADSPFF_SHARED_BUFFER_SIZE];
} adspff_shared_buffer_t;
struct adspff_shared_state_t {
uint16_t mbox_id;
union adspff_msgq_t msgq_recv;
union adspff_msgq_t msgq_send;
adspff_shared_buffer_t write_buf;
adspff_shared_buffer_t read_buf;
};
#define ADSPFF_SHARED_STATE(x) \
((struct adspff_shared_state_t *)x)
#ifdef __cplusplus
}
#endif
#endif /* #ifndef TEGRA_ADSPFF_H_ */

View File

@@ -0,0 +1,201 @@
/*
* amc.c
*
* AMC and ARAM handling
*
* Copyright (C) 2014-2021, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
#include <linux/irqchip/tegra-agic.h>
#include <linux/interrupt.h>
#include <linux/version.h>
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
#include <soc/tegra/chip-id.h>
#else
#include <soc/tegra/fuse.h>
#endif
#include "dev.h"
#include "amc.h"
static struct platform_device *nvadsp_pdev;
static struct nvadsp_drv_data *nvadsp_drv_data;
static inline u32 amc_readl(u32 reg)
{
return readl(nvadsp_drv_data->base_regs[AMC] + reg);
}
static inline void amc_writel(u32 val, u32 reg)
{
writel(val, nvadsp_drv_data->base_regs[AMC] + reg);
}
static void wmemcpy_to_aram(u32 to_aram, const u32 *from_mem, size_t wlen)
{
u32 base, offset;
base = to_aram & AMC_ARAM_APERTURE_DATA_LEN;
amc_writel(base, AMC_ARAM_APERTURE_BASE);
offset = to_aram % AMC_ARAM_APERTURE_DATA_LEN;
while (wlen--) {
if (offset == AMC_ARAM_APERTURE_DATA_LEN) {
base += AMC_ARAM_APERTURE_DATA_LEN;
amc_writel(base, AMC_ARAM_APERTURE_BASE);
offset = 0;
}
amc_writel(*from_mem, AMC_ARAM_APERTURE_DATA_START + offset);
from_mem++;
offset += 4;
}
}
static void wmemcpy_from_aram(u32 *to_mem, const u32 from_aram, size_t wlen)
{
u32 base, offset;
base = from_aram & AMC_ARAM_APERTURE_DATA_LEN;
amc_writel(base, AMC_ARAM_APERTURE_BASE);
offset = from_aram % AMC_ARAM_APERTURE_DATA_LEN;
while (wlen--) {
if (offset == AMC_ARAM_APERTURE_DATA_LEN) {
base += AMC_ARAM_APERTURE_DATA_LEN;
amc_writel(base, AMC_ARAM_APERTURE_BASE);
offset = 0;
}
*to_mem = amc_readl(AMC_ARAM_APERTURE_DATA_START + offset);
to_mem++;
offset += 4;
}
}
int nvadsp_aram_save(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
wmemcpy_from_aram(d->state.aram, AMC_ARAM_START, AMC_ARAM_WSIZE);
return 0;
}
int nvadsp_aram_restore(struct platform_device *pdev)
{
struct nvadsp_drv_data *ndd = platform_get_drvdata(pdev);
wmemcpy_to_aram(AMC_ARAM_START, ndd->state.aram, AMC_ARAM_WSIZE);
return 0;
}
int nvadsp_amc_save(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
u32 val, offset = 0;
int i = 0;
offset = 0x0;
val = readl(d->base_regs[AMC] + offset);
d->state.amc_regs[i++] = val;
offset = 0x8;
val = readl(d->base_regs[AMC] + offset);
d->state.amc_regs[i++] = val;
return 0;
}
int nvadsp_amc_restore(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
u32 val, offset = 0;
int i = 0;
offset = 0x0;
val = d->state.amc_regs[i++];
writel(val, d->base_regs[AMC] + offset);
offset = 0x8;
val = d->state.amc_regs[i++];
writel(val, d->base_regs[AMC] + offset);
return 0;
}
static irqreturn_t nvadsp_amc_error_int_handler(int irq, void *devid)
{
u32 val, addr, status, intr = 0;
status = amc_readl(AMC_INT_STATUS);
addr = amc_readl(AMC_ERROR_ADDR);
if (status & AMC_INT_STATUS_ARAM) {
/*
* Ignore addresses lesser than AMC_ERROR_ADDR_IGNORE (4k)
* as those are spurious ones due a hardware issue.
*/
if (!(nvadsp_drv_data->chip_data->amc_err_war) ||
(addr > AMC_ERROR_ADDR_IGNORE))
pr_info("nvadsp: invalid ARAM access. address: 0x%x\n",
addr);
intr |= AMC_INT_INVALID_ARAM_ACCESS;
}
if (status & AMC_INT_STATUS_REG) {
pr_info("nvadsp: invalid AMC reg access. address: 0x%x\n",
addr);
intr |= AMC_INT_INVALID_REG_ACCESS;
}
val = amc_readl(AMC_INT_CLR);
val |= intr;
amc_writel(val, AMC_INT_CLR);
return IRQ_HANDLED;
}
void nvadsp_free_amc_interrupts(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct device_node *node;
node = dev->of_node;
if (!is_tegra_hypervisor_mode())
devm_free_irq(dev, drv->agic_irqs[AMC_ERR_VIRQ], pdev);
}
int nvadsp_setup_amc_interrupts(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct device_node *node;
int ret = 0;
node = dev->of_node;
nvadsp_pdev = pdev;
nvadsp_drv_data = drv;
if (!is_tegra_hypervisor_mode())
ret = devm_request_irq(dev, drv->agic_irqs[AMC_ERR_VIRQ],
nvadsp_amc_error_int_handler, 0,
"AMC error int", pdev);
return ret;
}

View File

@@ -0,0 +1,58 @@
/*
* amc.h
*
* A header file for AMC/ARAM
*
* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_AMC_H
#define __TEGRA_NVADSP_AMC_H
#define AMC_CONFIG 0x00
#define AMC_CONFIG_ALIASING (1 << 0)
#define AMC_CONFIG_CARVEOUT (1 << 1)
#define AMC_CONFIG_ERR_RESP (1 << 2)
#define AMC_INT_STATUS (0x04)
#define AMC_INT_STATUS_ARAM (1 << 0)
#define AMC_INT_STATUS_REG (1 << 1)
#define AMC_INT_MASK 0x08
#define AMC_INT_SET 0x0C
#define AMC_INT_CLR 0x10
#define AMC_INT_INVALID_ARAM_ACCESS (1 << 0)
#define AMC_INT_INVALID_REG_ACCESS (1 << 1)
#define AMC_ERROR_ADDR 0x14
#define AMC_ERROR_ADDR_IGNORE SZ_4K
#define AMC_REGS 0x1000
#define AMC_ARAM_APERTURE_BASE 0x28
#define AMC_ARAM_APERTURE_DATA_START 0x800
#define AMC_ARAM_APERTURE_DATA_LEN 0x800 /* 2KB */
#define AMC_ARAM_ALIAS0 0x00400000
#define AMC_ARAM_ALIAS1 0x00500000
#define AMC_ARAM_ALIAS2 0x00600000
#define AMC_ARAM_ALIAS3 0x00700000
#define AMC_ARAM_START 0
#define AMC_ARAM_SIZE SZ_64K
#define AMC_ARAM_WSIZE (AMC_ARAM_SIZE >> 2)
int nvadsp_aram_save(struct platform_device *pdev);
int nvadsp_aram_restore(struct platform_device *pdev);
int nvadsp_amc_save(struct platform_device *pdev);
int nvadsp_amc_restore(struct platform_device *pdev);
#endif /* __TEGRA_NVADSP_AMC_H */

View File

@@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* amisc.h - AMISC register access
*
* Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
*
*/
#ifndef __TEGRA_NVADSP_AMISC_H
#define __TEGRA_NVADSP_AMISC_H
#include "dev.h"
#define AMISC_ADSP_STATUS (0x14)
#define AMISC_ADSP_L2_CLKSTOPPED (1 << 30)
#define AMISC_ADSP_L2_IDLE (1 << 31)
static inline u32 amisc_readl(struct nvadsp_drv_data *drv_data, u32 reg)
{
return readl(drv_data->base_regs[AMISC] + reg);
}
#endif /* __TEGRA_NVADSP_AMISC_H */

View File

@@ -0,0 +1,984 @@
/*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/platform/tegra/clock.h>
#include <linux/irqchip/tegra-agic.h>
#include <linux/irq.h>
#include "ape_actmon.h"
#include "dev.h"
#define ACTMON_DEV_CTRL 0x00
#define ACTMON_DEV_CTRL_ENB (0x1 << 31)
#define ACTMON_DEV_CTRL_UP_WMARK_NUM_SHIFT 26
#define ACTMON_DEV_CTRL_UP_WMARK_NUM_MASK (0x7 << 26)
#define ACTMON_DEV_CTRL_DOWN_WMARK_NUM_SHIFT 21
#define ACTMON_DEV_CTRL_DOWN_WMARK_NUM_MASK (0x7 << 21)
#define ACTMON_DEV_CTRL_UP_WMARK_ENB (0x1 << 19)
#define ACTMON_DEV_CTRL_DOWN_WMARK_ENB (0x1 << 18)
#define ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB (0x1 << 17)
#define ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB (0x1 << 16)
#define ACTMON_DEV_CTRL_AT_END_ENB (0x1 << 15)
#define ACTMON_DEV_CTRL_PERIODIC_ENB (0x1 << 13)
#define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
#define ACTMON_DEV_CTRL_K_VAL_MASK (0x7 << 10)
#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT (0)
#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK (0xff << 0)
#define ACTMON_DEV_UP_WMARK 0x04
#define ACTMON_DEV_DOWN_WMARK 0x08
#define ACTMON_DEV_AVG_UP_WMARK 0x0c
#define ACTMON_DEV_AVG_DOWN_WMARK 0x10
#define ACTMON_DEV_INIT_AVG 0x14
#define ACTMON_DEV_COUNT 0x18
#define ACTMON_DEV_AVG_COUNT 0x1c
#define ACTMON_DEV_INTR_STATUS 0x20
#define ACTMON_DEV_INTR_UP_WMARK (0x1 << 31)
#define ACTMON_DEV_INTR_DOWN_WMARK (0x1 << 30)
#define ACTMON_DEV_INTR_AVG_DOWN_WMARK (0x1 << 29)
#define ACTMON_DEV_INTR_AVG_UP_WMARK (0x1 << 28)
#define ACTMON_DEV_COUNT_WEGHT 0x24
#define ACTMON_DEV_SAMPLE_CTRL 0x28
#define ACTMON_DEV_SAMPLE_CTRL_TICK_65536 (0x1 << 2)
#define ACTMON_DEV_SAMPLE_CTRL_TICK_256 (0x0 << 1)
#define AMISC_ACTMON_0 0x54
#define AMISC_ACTMON_CNT_TARGET_ENABLE (0x1 << 31)
#define ACTMON_DEFAULT_AVG_WINDOW_LOG2 7
/* 1/10 of % i.e 60 % of max freq */
#define ACTMON_DEFAULT_AVG_BAND 6
#define ACTMON_MAX_REG_OFFSET 0x2c
/* TBD: These would come via dts file */
#define ACTMON_REG_OFFSET 0x800
/* milli second divider as SAMPLE_TICK*/
#define SAMPLE_MS_DIVIDER 65536
/* Sample period in ms */
#define ACTMON_DEFAULT_SAMPLING_PERIOD 20
#define AVG_COUNT_THRESHOLD 100000
static struct actmon ape_actmon;
static struct actmon *apemon;
/* APE activity monitor: Samples ADSP activity */
static struct actmon_dev actmon_dev_adsp = {
.reg = 0x000,
.clk_name = "adsp_cpu",
/* ADSP suspend activity floor */
.suspend_freq = 51200,
/* min step by which we want to boost in case of sudden boost request */
.boost_freq_step = 51200,
/* % of boost freq for boosting up */
.boost_up_coef = 200,
/*
* % of boost freq for boosting down. Should be boosted down by
* exponential down
*/
.boost_down_coef = 80,
/*
* % of device freq collected in a sample period set as boost up
* threshold. boost interrupt is generated when actmon_count
* (absolute actmon count in a sample period)
* crosses this threshold consecutively by up_wmark_window.
*/
.boost_up_threshold = 95,
/*
* % of device freq collected in a sample period set as boost down
* threshold. boost interrupt is generated when actmon_count(raw_count)
* crosses this threshold consecutively by down_wmark_window.
*/
.boost_down_threshold = 80,
/*
* No of times raw counts hits the up_threshold to generate an
* interrupt
*/
.up_wmark_window = 4,
/*
* No of times raw counts hits the down_threshold to generate an
* interrupt.
*/
.down_wmark_window = 8,
/*
* No of samples = 2^ avg_window_log2 for calculating exponential moving
* average.
*/
.avg_window_log2 = ACTMON_DEFAULT_AVG_WINDOW_LOG2,
/*
* "weight" is used to scale the count to match the device freq
* When 256 adsp active cpu clock are generated, actmon count
* is increamented by 1. Making weight as 256 ensures that 1 adsp active
* clk increaments actmon_count by 1.
* This makes actmon_count exactly reflect active adsp cpu clk
* cycles.
*/
.count_weight = 0x100,
/*
* FREQ_SAMPLER: samples number of device(adsp) active cycles
* weighted by count_weight to reflect * actmon_count within a
* sample period.
* LOAD_SAMPLER: samples actmon active cycles weighted by
* count_weight to reflect actmon_count within a sample period.
*/
.type = ACTMON_FREQ_SAMPLER,
.state = ACTMON_UNINITIALIZED,
};
static struct actmon_dev *actmon_devices[] = {
&actmon_dev_adsp,
};
static inline u32 actmon_readl(u32 offset)
{
return __raw_readl(apemon->base + offset);
}
static inline void actmon_writel(u32 val, u32 offset)
{
__raw_writel(val, apemon->base + offset);
}
static inline void actmon_wmb(void)
{
wmb();
}
#define offs(x) (dev->reg + x)
static inline unsigned long do_percent(unsigned long val, unsigned int pct)
{
return val * pct / 100;
}
static void actmon_update_sample_period(unsigned long period)
{
u32 sample_period_in_clks;
u32 val = 0;
apemon->sampling_period = period;
/*
* sample_period_in_clks <1..255> = (actmon_clk_freq<1..40800> *
* actmon_sample_period <10ms..40ms>) / SAMPLE_MS_DIVIDER(65536)
*/
sample_period_in_clks = (apemon->freq * apemon->sampling_period) /
SAMPLE_MS_DIVIDER;
val = actmon_readl(ACTMON_DEV_CTRL);
val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
val |= (sample_period_in_clks <<
ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
& ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
actmon_writel(val, ACTMON_DEV_CTRL);
}
static inline void actmon_dev_up_wmark_set(struct actmon_dev *dev)
{
u32 val;
unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
dev->cur_freq : apemon->freq;
val = freq * apemon->sampling_period;
actmon_writel(do_percent(val, dev->boost_up_threshold),
offs(ACTMON_DEV_UP_WMARK));
}
static inline void actmon_dev_down_wmark_set(struct actmon_dev *dev)
{
u32 val;
unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
dev->cur_freq : apemon->freq;
val = freq * apemon->sampling_period;
actmon_writel(do_percent(val, dev->boost_down_threshold),
offs(ACTMON_DEV_DOWN_WMARK));
}
static inline void actmon_dev_wmark_set(struct actmon_dev *dev)
{
u32 val;
unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
dev->cur_freq : apemon->freq;
val = freq * apemon->sampling_period;
actmon_writel(do_percent(val, dev->boost_up_threshold),
offs(ACTMON_DEV_UP_WMARK));
actmon_writel(do_percent(val, dev->boost_down_threshold),
offs(ACTMON_DEV_DOWN_WMARK));
}
static inline void actmon_dev_avg_wmark_set(struct actmon_dev *dev)
{
/*
* band: delta from current count to be set for avg upper
* and lower thresholds
*/
u32 band = dev->avg_band_freq * apemon->sampling_period;
u32 avg = dev->avg_count;
actmon_writel(avg + band, offs(ACTMON_DEV_AVG_UP_WMARK));
avg = max(avg, band);
actmon_writel(avg - band, offs(ACTMON_DEV_AVG_DOWN_WMARK));
}
static unsigned long actmon_dev_avg_freq_get(struct actmon_dev *dev)
{
u64 val;
if (dev->type == ACTMON_FREQ_SAMPLER)
return dev->avg_count / apemon->sampling_period;
val = (u64) dev->avg_count * dev->cur_freq;
do_div(val , apemon->freq * apemon->sampling_period);
return (u32)val;
}
/* Activity monitor sampling operations */
static irqreturn_t ape_actmon_dev_isr(int irq, void *dev_id)
{
u32 val, devval;
unsigned long flags;
struct actmon_dev *dev = (struct actmon_dev *)dev_id;
spin_lock_irqsave(&dev->lock, flags);
val = actmon_readl(offs(ACTMON_DEV_INTR_STATUS));
actmon_writel(val, offs(ACTMON_DEV_INTR_STATUS)); /* clr all */
devval = actmon_readl(offs(ACTMON_DEV_CTRL));
if (val & ACTMON_DEV_INTR_AVG_UP_WMARK) {
devval |= (ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
dev->avg_count = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
actmon_dev_avg_wmark_set(dev);
} else if (val & ACTMON_DEV_INTR_AVG_DOWN_WMARK) {
devval |= (ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
dev->avg_count = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
actmon_dev_avg_wmark_set(dev);
}
if (val & ACTMON_DEV_INTR_UP_WMARK) {
devval |= (ACTMON_DEV_CTRL_UP_WMARK_ENB |
ACTMON_DEV_CTRL_DOWN_WMARK_ENB);
dev->boost_freq = dev->boost_freq_step +
do_percent(dev->boost_freq, dev->boost_up_coef);
if (dev->boost_freq >= dev->max_freq) {
dev->boost_freq = dev->max_freq;
devval &= ~ACTMON_DEV_CTRL_UP_WMARK_ENB;
}
} else if (val & ACTMON_DEV_INTR_DOWN_WMARK) {
devval |= (ACTMON_DEV_CTRL_UP_WMARK_ENB |
ACTMON_DEV_CTRL_DOWN_WMARK_ENB);
dev->boost_freq =
do_percent(dev->boost_freq, dev->boost_down_coef);
if (dev->boost_freq == 0) {
devval &= ~ACTMON_DEV_CTRL_DOWN_WMARK_ENB;
}
}
actmon_writel(devval, offs(ACTMON_DEV_CTRL));
actmon_wmb();
spin_unlock_irqrestore(&dev->lock, flags);
return IRQ_WAKE_THREAD;
}
static irqreturn_t ape_actmon_dev_fn(int irq, void *dev_id)
{
unsigned long flags, freq;
struct actmon_dev *dev = (struct actmon_dev *)dev_id;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != ACTMON_ON) {
spin_unlock_irqrestore(&dev->lock, flags);
return IRQ_HANDLED;
}
freq = actmon_dev_avg_freq_get(dev);
dev->avg_actv_freq = freq; /* in kHz */
freq = do_percent(freq, dev->avg_sustain_coef);
freq += dev->boost_freq;
dev->target_freq = freq;
spin_unlock_irqrestore(&dev->lock, flags);
dev_dbg(dev->device, "%s(kHz): avg: %lu, boost: %lu, target: %lu, current: %lu\n",
dev->clk_name, dev->avg_actv_freq, dev->boost_freq, dev->target_freq,
dev->cur_freq);
#if defined(CONFIG_TEGRA_ADSP_DFS)
adsp_cpu_set_rate(freq);
#endif
return IRQ_HANDLED;
}
/* Activity monitor configuration and control */
static void actmon_dev_configure(struct actmon_dev *dev,
unsigned long freq)
{
u32 val;
dev->boost_freq = 0;
dev->cur_freq = freq;
dev->target_freq = freq;
dev->avg_actv_freq = freq;
if (dev->type == ACTMON_FREQ_SAMPLER) {
/*
* max actmon count = (count_weight * adsp_freq (khz)
* sample_period (ms)) / (PULSE_N_CLK+1)
* As Count_weight is set as 256(0x100) and
* (PULSE_N_CLK+1) = 256. both would be
* compensated while coming up max_actmon_count.
* in other word
* max actmon count = ((count_weight * adsp_freq *
* sample_period_reg * SAMPLE_TICK)
* / (ape_freq * (PULSE_N_CLK+1)))
* where -
* sample_period_reg : <1..255> sample period in no of
* actmon clocks per sample
* SAMPLE_TICK : Arbtrary value for ms - 65536, us - 256
* (PULSE_N_CLK + 1) : 256 - No of adsp "active" clocks to
* increament raw_count/ actmon_count
* by one.
*/
dev->avg_count = dev->cur_freq * apemon->sampling_period;
dev->avg_band_freq = dev->max_freq *
ACTMON_DEFAULT_AVG_BAND / 1000;
} else {
dev->avg_count = apemon->freq * apemon->sampling_period;
dev->avg_band_freq = apemon->freq *
ACTMON_DEFAULT_AVG_BAND / 1000;
}
actmon_writel(dev->avg_count, offs(ACTMON_DEV_INIT_AVG));
BUG_ON(!dev->boost_up_threshold);
dev->avg_sustain_coef = 100 * 100 / dev->boost_up_threshold;
actmon_dev_avg_wmark_set(dev);
actmon_dev_wmark_set(dev);
actmon_writel(dev->count_weight, offs(ACTMON_DEV_COUNT_WEGHT));
val = actmon_readl(ACTMON_DEV_CTRL);
val |= (ACTMON_DEV_CTRL_PERIODIC_ENB |
ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
val |= ((dev->avg_window_log2 - 1) << ACTMON_DEV_CTRL_K_VAL_SHIFT) &
ACTMON_DEV_CTRL_K_VAL_MASK;
val |= ((dev->down_wmark_window - 1) <<
ACTMON_DEV_CTRL_DOWN_WMARK_NUM_SHIFT) &
ACTMON_DEV_CTRL_DOWN_WMARK_NUM_MASK;
val |= ((dev->up_wmark_window - 1) <<
ACTMON_DEV_CTRL_UP_WMARK_NUM_SHIFT) &
ACTMON_DEV_CTRL_UP_WMARK_NUM_MASK;
val |= ACTMON_DEV_CTRL_DOWN_WMARK_ENB |
ACTMON_DEV_CTRL_UP_WMARK_ENB;
actmon_writel(val, offs(ACTMON_DEV_CTRL));
actmon_wmb();
}
static void actmon_dev_enable(struct actmon_dev *dev)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state == ACTMON_OFF) {
dev->state = ACTMON_ON;
val = actmon_readl(offs(ACTMON_DEV_CTRL));
val |= ACTMON_DEV_CTRL_ENB;
actmon_writel(val, offs(ACTMON_DEV_CTRL));
actmon_wmb();
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static void actmon_dev_disable(struct actmon_dev *dev)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state == ACTMON_ON) {
dev->state = ACTMON_OFF;
val = actmon_readl(offs(ACTMON_DEV_CTRL));
val &= ~ACTMON_DEV_CTRL_ENB;
actmon_writel(val, offs(ACTMON_DEV_CTRL));
actmon_writel(0xffffffff, offs(ACTMON_DEV_INTR_STATUS));
actmon_wmb();
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static int actmon_dev_probe(struct actmon_dev *dev)
{
struct nvadsp_drv_data *drv_data = dev_get_drvdata(dev->device);
int ret;
dev->irq = drv_data->agic_irqs[ACTMON_VIRQ];
ret = request_threaded_irq(dev->irq, ape_actmon_dev_isr,
ape_actmon_dev_fn, IRQ_TYPE_LEVEL_HIGH,
dev->clk_name, dev);
if (ret) {
dev_err(dev->device, "Failed irq %d request for %s\n", dev->irq,
dev->clk_name);
goto end;
}
disable_irq(dev->irq);
end:
return ret;
}
static int actmon_dev_init(struct actmon_dev *dev)
{
int ret = -EINVAL;
unsigned long freq;
spin_lock_init(&dev->lock);
dev->clk = clk_get_sys(NULL, dev->clk_name);
if (IS_ERR_OR_NULL(dev->clk)) {
dev_err(dev->device, "Failed to find %s clock\n",
dev->clk_name);
goto end;
}
ret = clk_prepare_enable(dev->clk);
if (ret) {
dev_err(dev->device, "unable to enable %s clock\n",
dev->clk_name);
goto err_enable;
}
dev->max_freq = freq = clk_get_rate(dev->clk) / 1000;
actmon_dev_configure(dev, freq);
dev->state = ACTMON_OFF;
actmon_dev_enable(dev);
enable_irq(dev->irq);
return 0;
err_enable:
clk_put(dev->clk);
end:
return ret;
}
#ifdef CONFIG_DEBUG_FS
#define RW_MODE (S_IWUSR | S_IRUSR)
#define RO_MODE S_IRUSR
static struct dentry *clk_debugfs_root;
static int type_show(struct seq_file *s, void *data)
{
struct actmon_dev *dev = s->private;
seq_printf(s, "%s\n", (dev->type == ACTMON_LOAD_SAMPLER) ?
"Load Activity Monitor" : "Frequency Activity Monitor");
return 0;
}
static int type_open(struct inode *inode, struct file *file)
{
return single_open(file, type_show, inode->i_private);
}
static const struct file_operations type_fops = {
.open = type_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int actv_get(void *data, u64 *val)
{
unsigned long flags;
struct actmon_dev *dev = data;
spin_lock_irqsave(&dev->lock, flags);
*val = actmon_dev_avg_freq_get(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(actv_fops, actv_get, NULL, "%llu\n");
static int step_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->boost_freq_step * 100 / dev->max_freq;
return 0;
}
static int step_set(void *data, u64 val)
{
unsigned long flags;
struct actmon_dev *dev = data;
if (val > 100)
val = 100;
spin_lock_irqsave(&dev->lock, flags);
dev->boost_freq_step = do_percent(dev->max_freq, (unsigned int)val);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(step_fops, step_get, step_set, "%llu\n");
static int count_weight_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->count_weight;
return 0;
}
static int count_weight_set(void *data, u64 val)
{
unsigned long flags;
struct actmon_dev *dev = data;
spin_lock_irqsave(&dev->lock, flags);
dev->count_weight = (u32) val;
actmon_writel(dev->count_weight, offs(ACTMON_DEV_COUNT_WEGHT));
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cnt_wt_fops, count_weight_get,
count_weight_set, "%llu\n");
static int up_threshold_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->boost_up_threshold;
return 0;
}
static int up_threshold_set(void *data, u64 val)
{
unsigned long flags;
struct actmon_dev *dev = data;
unsigned int up_threshold = (unsigned int)val;
if (up_threshold > 100)
up_threshold = 100;
spin_lock_irqsave(&dev->lock, flags);
if (up_threshold <= dev->boost_down_threshold)
up_threshold = dev->boost_down_threshold;
if (up_threshold)
dev->avg_sustain_coef = 100 * 100 / up_threshold;
dev->boost_up_threshold = up_threshold;
actmon_dev_up_wmark_set(dev);
actmon_wmb();
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(up_threshold_fops, up_threshold_get,
up_threshold_set, "%llu\n");
static int down_threshold_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->boost_down_threshold;
return 0;
}
static int down_threshold_set(void *data, u64 val)
{
unsigned long flags;
struct actmon_dev *dev = data;
unsigned int down_threshold = (unsigned int)val;
spin_lock_irqsave(&dev->lock, flags);
if (down_threshold >= dev->boost_up_threshold)
down_threshold = dev->boost_up_threshold;
dev->boost_down_threshold = down_threshold;
actmon_dev_down_wmark_set(dev);
actmon_wmb();
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(down_threshold_fops, down_threshold_get,
down_threshold_set, "%llu\n");
static int state_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->state;
return 0;
}
static int state_set(void *data, u64 val)
{
struct actmon_dev *dev = data;
if (val)
actmon_dev_enable(dev);
else
actmon_dev_disable(dev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, state_set, "%llu\n");
/* Get period in msec */
static int period_get(void *data, u64 *val)
{
*val = apemon->sampling_period;
return 0;
}
/* Set period in msec */
static int period_set(void *data, u64 val)
{
int i;
unsigned long flags;
u8 period = (u8)val;
if (period) {
actmon_update_sample_period(period);
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
struct actmon_dev *dev = actmon_devices[i];
spin_lock_irqsave(&dev->lock, flags);
actmon_dev_wmark_set(dev);
spin_unlock_irqrestore(&dev->lock, flags);
}
actmon_wmb();
return 0;
}
return -EINVAL;
}
DEFINE_SIMPLE_ATTRIBUTE(period_fops, period_get, period_set, "%llu\n");
static int actmon_debugfs_create_dev(struct actmon_dev *dev)
{
struct dentry *dir, *d;
if (dev->state == ACTMON_UNINITIALIZED)
return 0;
dir = debugfs_create_dir(dev->clk_name, clk_debugfs_root);
if (!dir)
return -ENOMEM;
d = debugfs_create_file(
"actv_type", RO_MODE, dir, dev, &type_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"avg_activity", RO_MODE, dir, dev, &actv_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"boost_step", RW_MODE, dir, dev, &step_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_u32(
"boost_rate_dec", RW_MODE, dir, (u32 *)&dev->boost_down_coef);
if (!d)
return -ENOMEM;
d = debugfs_create_u32(
"boost_rate_inc", RW_MODE, dir, (u32 *)&dev->boost_up_coef);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"boost_threshold_dn", RW_MODE, dir, dev, &down_threshold_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"boost_threshold_up", RW_MODE, dir, dev, &up_threshold_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"state", RW_MODE, dir, dev, &state_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"cnt_wt", RW_MODE, dir, dev, &cnt_wt_fops);
if (!d)
return -ENOMEM;
return 0;
}
static int actmon_debugfs_init(struct nvadsp_drv_data *drv)
{
int i;
int ret = -ENOMEM;
struct dentry *d;
if (!drv->adsp_debugfs_root)
return ret;
d = debugfs_create_dir("adsp_actmon", drv->adsp_debugfs_root);
if (!d)
return ret;
clk_debugfs_root = d;
d = debugfs_create_file("period", RW_MODE, d, NULL, &period_fops);
if (!d)
goto err_out;
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
ret = actmon_debugfs_create_dev(actmon_devices[i]);
if (ret)
goto err_out;
}
return 0;
err_out:
debugfs_remove_recursive(clk_debugfs_root);
return ret;
}
#endif
/* freq in KHz */
void actmon_rate_change(unsigned long freq, bool override)
{
struct actmon_dev *dev = &actmon_dev_adsp;
unsigned long flags;
if (override) {
actmon_dev_disable(dev);
spin_lock_irqsave(&dev->lock, flags);
dev->cur_freq = freq;
dev->avg_count = freq * apemon->sampling_period;
actmon_writel(dev->avg_count, offs(ACTMON_DEV_INIT_AVG));
actmon_dev_avg_wmark_set(dev);
actmon_dev_wmark_set(dev);
actmon_wmb();
spin_unlock_irqrestore(&dev->lock, flags);
actmon_dev_enable(dev);
} else {
spin_lock_irqsave(&dev->lock, flags);
dev->cur_freq = freq;
if (dev->state == ACTMON_ON) {
actmon_dev_wmark_set(dev);
actmon_wmb();
}
spin_unlock_irqrestore(&dev->lock, flags);
}
/* change ape rate as half of adsp rate */
clk_set_rate(apemon->clk, freq * 500);
};
int ape_actmon_probe(struct platform_device *pdev)
{
int ret = 0;
int i;
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
actmon_devices[i]->device = &pdev->dev;
ret = actmon_dev_probe(actmon_devices[i]);
dev_dbg(&pdev->dev, "%s actmon: %s probe (%d)\n",
actmon_devices[i]->clk_name, ret ? "Failed" : "Completed", ret);
}
return ret;
}
static int ape_actmon_rc_cb(
struct notifier_block *nb, unsigned long rate, void *v)
{
struct actmon_dev *dev = &actmon_dev_adsp;
unsigned long flags;
u32 init_cnt;
if (dev->state != ACTMON_ON) {
dev_dbg(dev->device, "adsp actmon is not ON\n");
goto exit_out;
}
actmon_dev_disable(dev);
spin_lock_irqsave(&dev->lock, flags);
init_cnt = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
/* update sample period to maintain number of clock */
apemon->freq = rate / 1000; /* in KHz */
actmon_update_sample_period(ACTMON_DEFAULT_SAMPLING_PERIOD);
actmon_writel(init_cnt, offs(ACTMON_DEV_INIT_AVG));
spin_unlock_irqrestore(&dev->lock, flags);
actmon_dev_enable(dev);
exit_out:
return NOTIFY_OK;
}
int ape_actmon_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
static void __iomem *amisc_base;
u32 sample_period_in_clks;
struct clk *p;
u32 val = 0;
int i, ret;
if (drv->actmon_initialized)
return 0;
apemon = &ape_actmon;
apemon->base = drv->base_regs[AMISC] + ACTMON_REG_OFFSET;
amisc_base = drv->base_regs[AMISC];
apemon->clk = clk_get_sys(NULL, "adsp.ape");
if (!apemon->clk) {
dev_err(&pdev->dev, "Failed to find actmon clock\n");
ret = -EINVAL;
goto err_out;
}
ret = clk_prepare_enable(apemon->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable actmon clock\n");
ret = -EINVAL;
goto err_out;
}
apemon->clk_rc_nb.notifier_call = ape_actmon_rc_cb;
/*
* "adsp.ape" clk is shared bus user clock and "ape" is bus clock
* but rate change notification should come from bus clock itself.
*/
p = clk_get_parent(apemon->clk);
if (!p) {
dev_err(&pdev->dev, "Failed to find actmon parent clock\n");
ret = -EINVAL;
goto clk_err_out;
}
ret = tegra_register_clk_rate_notifier(p, &apemon->clk_rc_nb);
if (ret) {
dev_err(&pdev->dev, "Registration fail: %s rate change notifier for %s\n",
p->name, apemon->clk->name);
goto clk_err_out;
}
apemon->freq = clk_get_rate(apemon->clk) / 1000; /* in KHz */
apemon->sampling_period = ACTMON_DEFAULT_SAMPLING_PERIOD;
/*
* sample period as no of actmon clocks
* Actmon is derived from APE clk.
* suppose APE clk is 204MHz = 204000 KHz and want to calculate
* clocks in 10ms sample
* in 1ms = 204000 cycles
* 10ms = 204000 * 10 APE cycles
* SAMPLE_MS_DIVIDER is an arbitrary number
*/
sample_period_in_clks = (apemon->freq * apemon->sampling_period)
/ SAMPLE_MS_DIVIDER;
/* set ms mode */
actmon_writel(ACTMON_DEV_SAMPLE_CTRL_TICK_65536,
ACTMON_DEV_SAMPLE_CTRL);
val = actmon_readl(ACTMON_DEV_CTRL);
val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
val |= (sample_period_in_clks <<
ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
& ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
actmon_writel(val, ACTMON_DEV_CTRL);
/* Enable AMISC_ACTMON */
val = __raw_readl(amisc_base + AMISC_ACTMON_0);
val |= AMISC_ACTMON_CNT_TARGET_ENABLE;
__raw_writel(val, amisc_base + AMISC_ACTMON_0);
actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS); /* clr all */
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
ret = actmon_dev_init(actmon_devices[i]);
dev_dbg(&pdev->dev, "%s actmon device: %s initialization (%d)\n",
actmon_devices[i]->clk_name, ret ? "Failed" : "Completed", ret);
}
#ifdef CONFIG_DEBUG_FS
actmon_debugfs_init(drv);
#endif
drv->actmon_initialized = true;
dev_dbg(&pdev->dev, "adsp actmon initialized ....\n");
return 0;
clk_err_out:
if (apemon->clk)
clk_disable_unprepare(apemon->clk);
err_out:
if (apemon->clk)
clk_put(apemon->clk);
return ret;
}
int ape_actmon_exit(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
struct actmon_dev *dev;
status_t ret = 0;
int i;
/* return if actmon is not initialized */
if (!drv->actmon_initialized)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
dev = actmon_devices[i];
actmon_dev_disable(dev);
disable_irq(dev->irq);
clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
}
tegra_unregister_clk_rate_notifier(clk_get_parent(apemon->clk),
&apemon->clk_rc_nb);
clk_disable_unprepare(apemon->clk);
clk_put(apemon->clk);
drv->actmon_initialized = false;
dev_dbg(&pdev->dev, "adsp actmon has exited ....\n");
return ret;
}

View File

@@ -0,0 +1,86 @@
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __APE_ACTMON_H
#define __APE_ACTMON_H
#include <linux/spinlock.h>
enum actmon_type {
ACTMON_LOAD_SAMPLER,
ACTMON_FREQ_SAMPLER,
};
enum actmon_state {
ACTMON_UNINITIALIZED = -1,
ACTMON_OFF = 0,
ACTMON_ON = 1,
ACTMON_SUSPENDED = 2,
};
/* Units:
* - frequency in kHz
* - coefficients, and thresholds in %
* - sampling period in ms
* - window in sample periods (value = setting + 1)
*/
struct actmon_dev {
u32 reg;
int irq;
struct device *device;
const char *dev_id;
const char *con_id;
const char *clk_name;
struct clk *clk;
unsigned long max_freq;
unsigned long target_freq;
unsigned long cur_freq;
unsigned long suspend_freq;
unsigned long avg_actv_freq;
unsigned long avg_band_freq;
unsigned int avg_sustain_coef;
u32 avg_count;
unsigned long boost_freq;
unsigned long boost_freq_step;
unsigned int boost_up_coef;
unsigned int boost_down_coef;
unsigned int boost_up_threshold;
unsigned int boost_down_threshold;
u8 up_wmark_window;
u8 down_wmark_window;
u8 avg_window_log2;
u32 count_weight;
enum actmon_type type;
enum actmon_state state;
enum actmon_state saved_state;
spinlock_t lock;
};
struct actmon {
struct clk *clk;
unsigned long freq;
unsigned long sampling_period;
struct notifier_block clk_rc_nb;
void __iomem *base;
};
int ape_actmon_init(struct platform_device *pdev);
int ape_actmon_exit(struct platform_device *pdev);
void actmon_rate_change(unsigned long freq, bool override);
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,972 @@
/*
* nvadsp_app.c
*
* ADSP OS App management
*
* Copyright (C) 2014-2022 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
#include <linux/elf.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <asm/hwcap.h>
#include "os.h"
#include "dram_app_mem_manager.h"
#include "adsp_shared_struct.h"
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
# define debug_align(X) ALIGN(X, PAGE_SIZE)
#else
# define debug_align(X) (X)
#endif
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
#endif
#define BITS_PER_INT 32
#define INIT_OFFSET_MASK (1U < (BITS_PER_INT-1))
#define HWCAP_SWP (1 << 0)
#define HWCAP_HALF (1 << 1)
#define HWCAP_THUMB (1 << 2)
#define HWCAP_26BIT (1 << 3) /* Play it safe */
#define HWCAP_FAST_MULT (1 << 4)
#define HWCAP_FPA (1 << 5)
#define HWCAP_VFP (1 << 6)
#define HWCAP_EDSP (1 << 7)
#define HWCAP_JAVA (1 << 8)
#define HWCAP_IWMMXT (1 << 9)
#define HWCAP_CRUNCH (1 << 10)
#define HWCAP_THUMBEE (1 << 11)
#define HWCAP_NEON (1 << 12)
#define HWCAP_VFPv3 (1 << 13)
#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
#define HWCAP_TLS (1 << 15)
#define HWCAP_VFPv4 (1 << 16)
#define HWCAP_IDIVA (1 << 17)
#define HWCAP_IDIVT (1 << 18)
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM_32 (1 << 21)
#define EF_ARM_EABI_MASK 0xff000000
#define EF_ARM_EABI_UNKNOWN 0x00000000
#define EF_ARM_EABI_VER1 0x01000000
#define EF_ARM_EABI_VER2 0x02000000
#define EF_ARM_EABI_VER3 0x03000000
#define EF_ARM_EABI_VER4 0x04000000
#define EF_ARM_EABI_VER5 0x05000000
#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
#define EF_ARM_PIC 0x00000020 /* ABI 0 */
#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
#define EF_ARM_HASENTRY 0x00000002 /* All */
#define EF_ARM_RELEXEC 0x00000001 /* All */
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
#define R_ARM_TARGET1 38
#define R_ARM_V4BX 40
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
#define R_ARM_THM_MOVW_ABS_NC 47
#define R_ARM_THM_MOVT_ABS 48
struct load_info {
const char *name;
struct elf32_hdr *hdr;
unsigned long len;
struct elf32_shdr *sechdrs;
char *secstrings, *strtab;
unsigned long symoffs, stroffs;
unsigned int num_debug;
bool sig_ok;
struct device *dev;
struct {
unsigned int sym, str, mod, vers, info, pcpu;
} index;
};
static int
apply_relocate(const struct load_info *info, Elf32_Shdr *sechdrs,
const char *strtab, unsigned int symindex,
unsigned int relindex, struct adsp_module *module)
{
Elf32_Shdr *symsec = sechdrs + symindex;
Elf32_Shdr *relsec = sechdrs + relindex;
Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
Elf32_Rel *rel = (void *)info->hdr + relsec->sh_offset;
struct device *dev = info->dev;
unsigned int i;
dev_dbg(dev, "the relative section is %s dst %s sym %s\n",
info->secstrings + relsec->sh_name,
info->secstrings + dstsec->sh_name,
info->secstrings + symsec->sh_name);
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
void *loc;
Elf32_Sym *sym;
const char *symname;
s32 offset;
u32 upper, lower, sign, j1, j2;
uint32_t adsp_loc;
bool switch_mode = false;
int h_bit = 0;
offset = ELF32_R_SYM(rel->r_info);
if (offset < 0 || (offset >
(symsec->sh_size / sizeof(Elf32_Sym)))) {
dev_err(dev, "%s: section %u reloc %u: bad relocation sym offset\n",
module->name, relindex, i);
return -ENOEXEC;
}
sym = ((Elf32_Sym *)(module->module_ptr
+ symsec->sh_addr)) + offset;
symname = info->strtab + sym->st_name;
dev_dbg(dev, "%s\n", symname);
if (rel->r_offset < 0 ||
rel->r_offset > dstsec->sh_size - sizeof(u32)) {
dev_err(dev,
"%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
module->name, relindex, i, symname,
rel->r_offset, dstsec->sh_size);
return -ENOEXEC;
}
loc = module->module_ptr + dstsec->sh_addr + rel->r_offset;
adsp_loc = module->adsp_module_ptr +
dstsec->sh_addr + rel->r_offset;
dev_dbg(dev, "%p 0x%x\n", loc, adsp_loc);
if (ELF_ST_BIND(sym->st_info) == STB_WEAK
&& sym->st_shndx == SHN_UNDEF) {
dev_dbg(dev, "STB_WEAK %s\n", symname);
continue;
}
switch (ELF32_R_TYPE(rel->r_info)) {
case R_ARM_NONE:
dev_dbg(dev, "R_ARM_NONE\n");
/* ignore */
break;
case R_ARM_ABS32:
case R_ARM_TARGET1:
dev_dbg(dev, "R_ARM_ABS32\n");
*(u32 *)loc += sym->st_value;
dev_dbg(dev, "addrs: 0x%x %p values: 0x%x 0x%x\n",
adsp_loc, loc, sym->st_value,
*(u32 *)loc);
break;
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
dev_dbg(dev, "R_ARM_CALL R_ARM_JUMP24\n");
offset = (*(u32 *)loc & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
offset += sym->st_value - adsp_loc;
if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC)
&& (offset & 3)) {
dev_dbg(dev, "switching the mode from ARM to THUMB\n");
switch_mode = true;
h_bit = (offset & 2);
dev_dbg(dev,
"%s offset 0x%x hbit %d",
symname, offset, h_bit);
}
if (offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000) {
dev_err(dev,
"%s: section %u reloc %u sym '%s': relocation %u out of range (%p -> %#x)\n",
module->name, relindex, i, symname,
ELF32_R_TYPE(rel->r_info), loc,
sym->st_value);
return -ENOEXEC;
}
offset >>= 2;
*(u32 *)loc &= 0xff000000;
*(u32 *)loc |= offset & 0x00ffffff;
if (switch_mode) {
*(u32 *)loc &= ~(0xff000000);
if (h_bit)
*(u32 *)loc |= 0xfb000000;
else
*(u32 *)loc |= 0xfa000000;
}
dev_dbg(dev,
"%s address 0x%x instruction 0x%x\n",
symname, adsp_loc, *(u32 *)loc);
break;
case R_ARM_V4BX:
dev_dbg(dev, "R_ARM_V4BX\n");
/* Preserve Rm and the condition code. Alter
* other bits to re-code instruction as
* MOV PC,Rm.
*/
*(u32 *)loc &= 0xf000000f;
*(u32 *)loc |= 0x01a0f000;
break;
case R_ARM_PREL31:
dev_dbg(dev, "R_ARM_PREL31\n");
offset = *(u32 *)loc + sym->st_value - adsp_loc;
*(u32 *)loc = offset & 0x7fffffff;
break;
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
dev_dbg(dev, "R_ARM_MOVT_ABS\n");
offset = *(u32 *)loc;
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
offset = (offset ^ 0x8000) - 0x8000;
offset += sym->st_value;
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
offset >>= 16;
*(u32 *)loc &= 0xfff0f000;
*(u32 *)loc |= ((offset & 0xf000) << 4) |
(offset & 0x0fff);
break;
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
dev_dbg(dev, "R_ARM_THM_CALL R_ARM_THM_JUMP24\n");
upper = *(u16 *)loc;
lower = *(u16 *)(loc + 2);
/*
* 25 bit signed address range (Thumb-2 BL and B.W
* instructions):
* S:I1:I2:imm10:imm11:0
* where:
* S = upper[10] = offset[24]
* I1 = ~(J1 ^ S) = offset[23]
* I2 = ~(J2 ^ S) = offset[22]
* imm10 = upper[9:0] = offset[21:12]
* imm11 = lower[10:0] = offset[11:1]
* J1 = lower[13]
* J2 = lower[11]
*/
sign = (upper >> 10) & 1;
j1 = (lower >> 13) & 1;
j2 = (lower >> 11) & 1;
offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
((~(j2 ^ sign) & 1) << 22) |
((upper & 0x03ff) << 12) |
((lower & 0x07ff) << 1);
if (offset & 0x01000000)
offset -= 0x02000000;
offset += sym->st_value - adsp_loc;
/*
* For function symbols, only Thumb addresses are
* allowed (no interworking).
*
* For non-function symbols, the destination
* has no specific ARM/Thumb disposition, so
* the branch is resolved under the assumption
* that interworking is not required.
*/
if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
!(offset & 1)) {
dev_dbg(dev,
"switching the mode from THUMB to ARM\n");
switch_mode = true;
offset = ALIGN(offset, 4);
}
if (offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000) {
dev_err(dev,
"%s: section %u reloc %u sym '%s': relocation %u out of range (%p -> %#x)\n",
module->name, relindex, i, symname,
ELF32_R_TYPE(rel->r_info), loc,
sym->st_value);
return -ENOEXEC;
}
sign = (offset >> 24) & 1;
j1 = sign ^ (~(offset >> 23) & 1);
j2 = sign ^ (~(offset >> 22) & 1);
*(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
((offset >> 12) & 0x03ff));
*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
(j1 << 13) | (j2 << 11) |
((offset >> 1) & 0x07ff));
if (switch_mode) {
lower = *(u16 *)(loc + 2);
lower &= (~(1 << 12));
*(u16 *)(loc + 2) = lower;
}
dev_dbg(dev,
"%s address 0x%x upper instruction 0x%x\n",
symname, adsp_loc, *(u16 *)loc);
dev_dbg(dev,
"%s address 0x%x lower instruction 0x%x\n",
symname, adsp_loc, *(u16 *)(loc + 2));
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
dev_dbg(dev, "in R_ARM_THM_MOVT_ABS\n");
upper = *(u16 *)loc;
lower = *(u16 *)(loc + 2);
/*
* MOVT/MOVW instructions encoding in Thumb-2:
*
* i = upper[10]
* imm4 = upper[3:0]
* imm3 = lower[14:12]
* imm8 = lower[7:0]
*
* imm16 = imm4:i:imm3:imm8
*/
offset = ((upper & 0x000f) << 12) |
((upper & 0x0400) << 1) |
((lower & 0x7000) >> 4) | (lower & 0x00ff);
offset = (offset ^ 0x8000) - 0x8000;
offset += sym->st_value;
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
offset >>= 16;
*(u16 *)loc = (u16)((upper & 0xfbf0) |
((offset & 0xf000) >> 12) |
((offset & 0x0800) >> 1));
*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
((offset & 0x0700) << 4) |
(offset & 0x00ff));
break;
default:
dev_err(dev, "%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
return -ENOEXEC;
}
}
return 0;
}
static int
apply_relocations(struct adsp_module *mod,
const struct load_info *info)
{
unsigned int i;
int err = 0;
/* Now do relocations. */
for (i = 1; i < info->hdr->e_shnum; i++) {
unsigned int infosec = info->sechdrs[i].sh_info;
/* Not a valid relocation section? */
if (infosec >= info->hdr->e_shnum)
continue;
/* Don't bother with non-allocated sections */
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue;
if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info, info->sechdrs, info->strtab,
info->index.sym, i, mod);
else if (info->sechdrs[i].sh_type == SHT_RELA)
return -EINVAL;
if (err < 0)
break;
}
return err;
}
static int
simplify_symbols(struct adsp_module *mod,
const struct load_info *info)
{
Elf32_Shdr *symsec = &info->sechdrs[info->index.sym];
Elf32_Sym *sym = mod->module_ptr + symsec->sh_addr;
unsigned int secbase;
unsigned int i;
int ret = 0;
struct global_sym_info *sym_info;
struct device *dev = info->dev;
for (i = 1; i < symsec->sh_size / sizeof(Elf32_Sym); i++) {
const char *name = info->strtab + sym[i].st_name;
dev_dbg(dev, "%s\n", name);
switch (sym[i].st_shndx) {
case SHN_COMMON:
/* We compiled with -fno-common. These are not
supposed to happen. */
dev_err(dev, "Common symbol: '%s'\n", name);
dev_err(dev,
"please compile module %s with -fno-common\n",
mod->name);
ret = -ENOEXEC;
goto end;
case SHN_ABS:
/* Don't need to do anything */
dev_dbg(dev, "Absolute symbol: 0x%08lx\n",
(long)sym[i].st_value);
break;
case SHN_UNDEF:
sym_info = find_global_symbol(name);
/* Ok if resolved. */
if (sym_info) {
dev_dbg(dev, "SHN_UNDEF sym '%s':0x%x\n",
name, sym_info->addr);
sym[i].st_value = sym_info->addr;
sym[i].st_info = sym_info->info;
break;
}
if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK) {
dev_dbg(dev, "WEAK SYM %s not resolved\n",
name);
break;
}
dev_err(dev, "No symbol '%s' found\n", name);
ret = -ENOEXEC;
goto end;
default:
/* Divert to percpu allocation if a percpu var. */
dev_dbg(dev, "default\n");
secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
sym[i].st_value += secbase + mod->adsp_module_ptr;
dev_dbg(dev, "symbol %s is 0x%x\n",
name, sym[i].st_value);
break;
}
}
end:
return ret;
}
static int move_module(struct adsp_module *mod, struct load_info *info)
{
struct device *dev = info->dev;
int i;
mod->handle = dram_app_mem_request(info->name, mod->size);
if (!mod->handle) {
dev_err(dev, "cannot allocate memory for app %s\n", info->name);
return -ENOMEM;
}
mod->adsp_module_ptr = dram_app_mem_get_address(mod->handle);
mod->module_ptr = nvadsp_da_to_va_mappings(mod->adsp_module_ptr,
mod->size);
dev_info(dev, "module %s Load address %p 0x%x\n", info->name,
mod->module_ptr, mod->adsp_module_ptr);
/* Transfer each section which specifies SHF_ALLOC */
dev_dbg(dev, "final section addresses:\n");
for (i = 0; i < info->hdr->e_shnum; i++) {
void *dest;
struct elf32_shdr *shdr = &info->sechdrs[i];
if (!(shdr->sh_flags & SHF_ALLOC))
continue;
if (shdr->sh_entsize & INIT_OFFSET_MASK) {
dev_dbg(dev, "%s %d\n",
info->secstrings + shdr->sh_name,
shdr->sh_entsize);
dest = mod->module_ptr
+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
} else {
dev_dbg(dev, "%s %d\n",
info->secstrings + shdr->sh_name,
shdr->sh_entsize);
dest = mod->module_ptr + shdr->sh_entsize;
}
if (shdr->sh_type != SHT_NOBITS)
memcpy(dest,
(void *)info->hdr + shdr->sh_offset,
shdr->sh_size);
/* Update sh_addr to point to copy in image. */
shdr->sh_addr = (uint32_t)(dest - mod->module_ptr);
dev_dbg(dev, "name %s 0x%x %p 0x%x 0x%x\n",
info->secstrings + shdr->sh_name, shdr->sh_addr,
dest, shdr->sh_addr + mod->adsp_module_ptr,
shdr->sh_size);
}
return 0;
}
static int get_offset(struct adsp_module *mod, size_t *size,
struct elf32_shdr *sechdr, unsigned int section)
{
int ret;
ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
*size = ret + sechdr->sh_size;
return ret;
}
static bool
is_core_symbol(const struct elf32_sym *src,
const struct elf32_shdr *sechdrs, unsigned int shnum)
{
const struct elf32_shdr *sec;
if (src->st_shndx == SHN_UNDEF
|| src->st_shndx >= shnum
|| !src->st_name)
return false;
sec = sechdrs + src->st_shndx;
if (!(sec->sh_flags & SHF_ALLOC)
#ifndef CONFIG_KALLSYMS_ALL
|| !(sec->sh_flags & SHF_EXECINSTR)
#endif
|| (sec->sh_entsize & INIT_OFFSET_MASK))
return false;
return true;
}
static void layout_sections(struct adsp_module *mod, struct load_info *info)
{
static unsigned long const masks[][2] = {
/* NOTE: all executable code must be the first section
* in this array; otherwise modify the text_size
* finder in the two loops below */
{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
};
unsigned int m, i;
struct device *dev = info->dev;
for (i = 0; i < info->hdr->e_shnum; i++)
info->sechdrs[i].sh_entsize = ~0U;
dev_dbg(dev, "Core section allocation order:\n");
for (m = 0; m < ARRAY_SIZE(masks); ++m) {
for (i = 0; i < info->hdr->e_shnum; ++i) {
struct elf32_shdr *s = &info->sechdrs[i];
const char *sname = info->secstrings + s->sh_name;
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0U
|| strstarts(sname, ".init"))
continue;
s->sh_entsize = get_offset(mod, &mod->size, s, i);
dev_dbg(dev, "\t%s %d\n", sname, s->sh_entsize);
}
}
dev_dbg(dev, "Init section allocation order:\n");
for (m = 0; m < ARRAY_SIZE(masks); ++m) {
for (i = 0; i < info->hdr->e_shnum; ++i) {
struct elf32_shdr *s = &info->sechdrs[i];
const char *sname = info->secstrings + s->sh_name;
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0U
|| !strstarts(sname, ".init"))
continue;
s->sh_entsize = (get_offset(mod, &mod->size, s, i)
| INIT_OFFSET_MASK);
dev_dbg(dev, "\t%s %d\n", sname, s->sh_entsize);
}
}
}
static int rewrite_section_headers(struct load_info *info)
{
unsigned int i;
struct device *dev = info->dev;
/* This should always be true, but let's be sure. */
info->sechdrs[0].sh_addr = 0;
for (i = 1; i < info->hdr->e_shnum; i++) {
struct elf32_shdr *shdr = &info->sechdrs[i];
if (shdr->sh_type != SHT_NOBITS
&& info->len < shdr->sh_offset + shdr->sh_size) {
dev_err(dev, "Module len %lu truncated\n", info->len);
return -ENOEXEC;
}
/* Mark all sections sh_addr with their address in the
temporary image. */
shdr->sh_addr = shdr->sh_offset;
}
return 0;
}
static struct adsp_module *setup_load_info(struct load_info *info)
{
unsigned int i;
int err;
struct adsp_module *mod;
struct device *dev = info->dev;
/* Set up the convenience variables */
info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
info->secstrings = (void *)info->hdr
+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
err = rewrite_section_headers(info);
if (err)
return ERR_PTR(err);
/* Find internal symbols and strings. */
for (i = 1; i < info->hdr->e_shnum; i++) {
if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
info->index.sym = i;
info->index.str = info->sechdrs[i].sh_link;
info->strtab = (char *)info->hdr
+ info->sechdrs[info->index.str].sh_offset;
break;
}
}
/* This is temporary: point mod into copy of data. */
mod = kzalloc(sizeof(struct adsp_module), GFP_KERNEL);
if (!mod) {
dev_err(dev, "Unable to create module\n");
return ERR_PTR(-ENOMEM);
}
if (info->index.sym == 0) {
dev_warn(dev, "%s: module has no symbols (stripped?)\n",
info->name);
kfree(mod);
return ERR_PTR(-ENOEXEC);
}
return mod;
}
static void layout_symtab(struct adsp_module *mod, struct load_info *info)
{
struct elf32_shdr *symsect = info->sechdrs + info->index.sym;
struct elf32_shdr *strsect = info->sechdrs + info->index.str;
const struct elf32_sym *src;
unsigned int i, nsrc, ndst, strtab_size = 0;
struct device *dev = info->dev;
/* Put symbol section at end of init part of module. */
symsect->sh_flags |= SHF_ALLOC;
symsect->sh_entsize = get_offset(mod, &mod->size, symsect,
info->index.sym) | INIT_OFFSET_MASK;
dev_dbg(dev, "\t%s %d\n", info->secstrings + symsect->sh_name,
symsect->sh_entsize);
src = (void *)info->hdr + symsect->sh_offset;
nsrc = symsect->sh_size / sizeof(*src);
/* Compute total space required for the core symbols' strtab. */
for (ndst = i = 0; i < nsrc; i++) {
if (i == 0 ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
strtab_size += strlen(&info->strtab[src[i].st_name])+1;
ndst++;
}
}
/* Append room for core symbols at end of core part. */
info->symoffs = ALIGN(mod->size, symsect->sh_addralign ?: 1);
info->stroffs = mod->size = info->symoffs + ndst * sizeof(Elf32_Sym);
mod->size += strtab_size;
/* Put string table section at end of init part of module. */
strsect->sh_flags |= SHF_ALLOC;
strsect->sh_entsize = get_offset(mod, &mod->size, strsect,
info->index.str) | INIT_OFFSET_MASK;
dev_dbg(dev, "\t%s %d\n",
info->secstrings + strsect->sh_name,
symsect->sh_entsize);
}
static struct adsp_module *layout_and_allocate(struct load_info *info)
{
/* Module within temporary copy. */
struct adsp_module *mod;
int err;
mod = setup_load_info(info);
if (IS_ERR(mod))
return mod;
mod->name = info->name;
/* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically; there doesn't appear to be any
special cases for the architectures. */
layout_sections(mod, info);
layout_symtab(mod, info);
/* Allocate and move to the final place */
err = move_module(mod, info);
if (err) {
/* TODO: need to handle error path more genericly */
kfree(mod);
return ERR_PTR(err);
}
return mod;
}
static int elf_check_arch_arm32(const struct elf32_hdr *x)
{
unsigned int eflags;
/* Make sure it's an ARM executable */
if (x->e_machine != EM_ARM)
return 0;
/* Make sure the entry address is reasonable */
if (x->e_entry & 1) {
if (!(ELF_HWCAP & HWCAP_THUMB))
return 0;
} else if (x->e_entry & 3)
return 0;
eflags = x->e_flags;
if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
unsigned int flt_fmt;
/* APCS26 is only allowed if the CPU supports it */
if ((eflags & EF_ARM_APCS_26) && !(ELF_HWCAP & HWCAP_26BIT))
return 0;
flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
/* VFP requires the supporting code */
if (flt_fmt == EF_ARM_VFP_FLOAT && !(ELF_HWCAP & HWCAP_VFP))
return 0;
}
return 1;
}
static int elf_header_check(struct load_info *info)
{
if (info->len < sizeof(*(info->hdr)))
return -ENOEXEC;
if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
|| info->hdr->e_type != ET_REL
|| !elf_check_arch_arm32(info->hdr)
|| info->hdr->e_shentsize != sizeof(Elf32_Shdr))
return -ENOEXEC;
if (info->hdr->e_shoff >= info->len
|| (info->hdr->e_shnum * sizeof(Elf32_Shdr) >
info->len - info->hdr->e_shoff))
return -ENOEXEC;
return 0;
}
struct adsp_module *load_adsp_static_module(const char *appname,
struct adsp_shared_app *shared_app, struct device *dev)
{
struct adsp_module *mod = NULL;
mod = kzalloc(sizeof(struct adsp_module), GFP_KERNEL);
if (!mod)
return NULL;
memcpy((struct app_mem_size *)&mod->mem_size,
&shared_app->mem_size, sizeof(shared_app->mem_size));
mod->adsp_module_ptr = shared_app->mod_ptr;
mod->dynamic = false;
memcpy(mod->version, shared_app->version, sizeof(shared_app->version));
return mod;
}
struct adsp_module *load_adsp_dynamic_module(const char *appname,
const char *appfile, struct device *dev)
{
struct load_info info = { };
struct adsp_module *mod = NULL;
const struct firmware *fw;
struct elf32_shdr *data_shdr;
struct elf32_shdr *shared_shdr;
struct elf32_shdr *shared_wc_shdr;
struct elf32_shdr *aram_shdr;
struct elf32_shdr *aram_x_shdr;
struct app_mem_size *mem_size;
void *buf;
int ret;
ret = request_firmware(&fw, appfile, dev);
if (ret < 0) {
dev_err(dev,
"request firmware for %s(%s) failed with %d\n",
appname, appfile, ret);
return ERR_PTR(ret);
}
buf = kzalloc(fw->size, GFP_KERNEL);
if (!buf)
goto release_firmware;
memcpy(buf, fw->data, fw->size);
info.hdr = (struct elf32_hdr *)buf;
info.len = fw->size;
info.dev = dev;
info.name = appname;
ret = elf_header_check(&info);
if (ret) {
dev_err(dev,
"%s is not an elf file\n", appfile);
goto error_free_memory;
}
/* Figure out module layout, and allocate all the memory. */
mod = layout_and_allocate(&info);
if (IS_ERR_OR_NULL(mod))
goto error_free_memory;
/* update adsp specific sections */
data_shdr = nvadsp_get_section(fw, ".dram_data");
shared_shdr = nvadsp_get_section(fw, ".dram_shared");
shared_wc_shdr = nvadsp_get_section(fw, ".dram_shared_wc");
aram_shdr = nvadsp_get_section(fw, ".aram_data");
aram_x_shdr = nvadsp_get_section(fw, ".aram_x_data");
mem_size = (void *)&mod->mem_size;
if (data_shdr) {
dev_dbg(dev, "mem_size.dram_data %d\n",
data_shdr->sh_size);
mem_size->dram = data_shdr->sh_size;
}
if (shared_shdr) {
dev_dbg(dev, "mem_size.dram_shared %d\n",
shared_shdr->sh_size);
mem_size->dram_shared =
shared_shdr->sh_size;
}
if (shared_wc_shdr) {
dev_dbg(dev, "shared_wc_shdr->sh_size %d\n",
shared_wc_shdr->sh_size);
mem_size->dram_shared_wc =
shared_wc_shdr->sh_size;
}
if (aram_shdr) {
dev_dbg(dev, "aram_shdr->sh_size %d\n", aram_shdr->sh_size);
mem_size->aram = aram_shdr->sh_size;
}
if (aram_x_shdr) {
dev_dbg(dev,
"aram_x_shdr->sh_size %d\n", aram_x_shdr->sh_size);
mem_size->aram_x = aram_x_shdr->sh_size;
}
/* Fix up syms, so that st_value is a pointer to location. */
ret = simplify_symbols(mod, &info);
if (ret) {
dev_err(dev, "Unable to simplify symbols\n");
goto unload_module;
}
dev_dbg(dev, "applying relocation\n");
ret = apply_relocations(mod, &info);
if (ret) {
dev_err(dev, "relocation failed\n");
goto unload_module;
}
mod->dynamic = true;
error_free_memory:
kfree(buf);
release_firmware:
release_firmware(fw);
return ret ? ERR_PTR(ret) : mod;
unload_module:
kfree(buf);
unload_adsp_module(mod);
release_firmware(fw);
return ERR_PTR(ret);
}
void unload_adsp_module(struct adsp_module *mod)
{
dram_app_mem_release(mod->handle);
kfree(mod);
}

View File

@@ -0,0 +1,96 @@
/*
* aram_managerc
*
* ARAM manager
*
* Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/tegra_nvadsp.h>
#include "aram_manager.h"
static void *aram_handle;
static LIST_HEAD(aram_alloc_list);
static LIST_HEAD(aram_free_list);
void nvadsp_aram_print(void)
{
mem_print(aram_handle);
}
void *nvadsp_aram_request(const char *name, size_t size)
{
return mem_request(aram_handle, name, size);
}
bool nvadsp_aram_release(void *handle)
{
return mem_release(aram_handle, handle);
}
unsigned long nvadsp_aram_get_address(void *handle)
{
return mem_get_address(handle);
}
static struct dentry *aram_dump_debugfs_file;
static int nvadsp_aram_dump(struct seq_file *s, void *data)
{
mem_dump(aram_handle, s);
return 0;
}
static int nvadsp_aram_dump_open(struct inode *inode, struct file *file)
{
return single_open(file, nvadsp_aram_dump, inode->i_private);
}
static const struct file_operations aram_dump_fops = {
.open = nvadsp_aram_dump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
int nvadsp_aram_init(unsigned long addr, unsigned long size)
{
aram_handle = create_mem_manager("ARAM", addr, size);
if (IS_ERR(aram_handle)) {
pr_err("ERROR: failed to create aram memory_manager");
return PTR_ERR(aram_handle);
}
if (debugfs_initialized()) {
aram_dump_debugfs_file = debugfs_create_file("aram_dump",
S_IRUSR, NULL, NULL, &aram_dump_fops);
if (!aram_dump_debugfs_file) {
pr_err("ERROR: failed to create aram_dump debugfs");
destroy_mem_manager(aram_handle);
return -ENOMEM;
}
}
return 0;
}
void nvadsp_aram_exit(void)
{
debugfs_remove(aram_dump_debugfs_file);
destroy_mem_manager(aram_handle);
}

View File

@@ -0,0 +1,23 @@
/*
* Header file for aram manager
*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __TEGRA_NVADSP_ARAM_MANAGER_H
#define __TEGRA_NVADSP_ARAM_MANAGER_H
#include "mem_manager.h"
int nvadsp_aram_init(unsigned long addr, unsigned long size);
void nvadsp_aram_exit(void);
#endif /* __TEGRA_NVADSP_ARAM_MANAGER_H */

View File

@@ -0,0 +1,307 @@
/*
* Copyright (c) 2015-2021, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/version.h>
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
#include <soc/tegra/chip-id.h>
#else
#include <soc/tegra/fuse.h>
#endif
#include <linux/platform_device.h>
#include <linux/tegra_nvadsp.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/tegra_nvadsp.h>
#ifdef CONFIG_TEGRA_VIRT_AUDIO_IVC
#include "tegra_virt_alt_ivc_common.h"
#include "tegra_virt_alt_ivc.h"
#endif
#include "dev.h"
#include "dev-t18x.h"
#ifdef CONFIG_PM
static int nvadsp_t18x_clocks_disable(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
/* APE and APB2APE clocks which are required by NVADSP are controlled
* from parent ACONNECT bus driver
*/
if (drv_data->adsp_clk) {
clk_disable_unprepare(drv_data->adsp_clk);
dev_dbg(dev, "adsp clocks disabled\n");
drv_data->adsp_clk = NULL;
}
if (drv_data->aclk_clk) {
clk_disable_unprepare(drv_data->aclk_clk);
dev_dbg(dev, "aclk clock disabled\n");
drv_data->aclk_clk = NULL;
}
if (drv_data->adsp_neon_clk) {
clk_disable_unprepare(drv_data->adsp_neon_clk);
dev_dbg(dev, "adsp_neon clocks disabled\n");
drv_data->adsp_neon_clk = NULL;
}
return 0;
}
static int nvadsp_t18x_clocks_enable(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret = 0;
/* APE and APB2APE clocks which are required by NVADSP are controlled
* from parent ACONNECT bus driver
*/
drv_data->adsp_clk = devm_clk_get(dev, "adsp");
if (IS_ERR_OR_NULL(drv_data->adsp_clk)) {
dev_err(dev, "unable to find adsp clock\n");
ret = PTR_ERR(drv_data->adsp_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->adsp_clk);
if (ret) {
dev_err(dev, "unable to enable adsp clock\n");
goto end;
}
drv_data->aclk_clk = devm_clk_get(dev, "aclk");
if (IS_ERR_OR_NULL(drv_data->aclk_clk)) {
dev_err(dev, "unable to find aclk clock\n");
ret = PTR_ERR(drv_data->aclk_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->aclk_clk);
if (ret) {
dev_err(dev, "unable to enable aclk clock\n");
goto end;
}
drv_data->adsp_neon_clk = devm_clk_get(dev, "adspneon");
if (IS_ERR_OR_NULL(drv_data->adsp_neon_clk)) {
dev_err(dev, "unable to find adsp neon clock\n");
ret = PTR_ERR(drv_data->adsp_neon_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->adsp_neon_clk);
if (ret) {
dev_err(dev, "unable to enable adsp neon clock\n");
goto end;
}
dev_dbg(dev, "adsp neon clock enabled\n");
dev_dbg(dev, "all clocks enabled\n");
return 0;
end:
nvadsp_t18x_clocks_disable(pdev);
return ret;
}
static int __nvadsp_t18x_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int ret;
dev_dbg(dev, "at %s:%d\n", __func__, __LINE__);
ret = nvadsp_t18x_clocks_enable(pdev);
if (ret) {
dev_dbg(dev, "failed in nvadsp_t18x_clocks_enable\n");
return ret;
}
return ret;
}
static int __nvadsp_t18x_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
dev_dbg(dev, "at %s:%d\n", __func__, __LINE__);
return nvadsp_t18x_clocks_disable(pdev);
}
static int __nvadsp_t18x_runtime_idle(struct device *dev)
{
dev_dbg(dev, "at %s:%d\n", __func__, __LINE__);
return 0;
}
int nvadsp_pm_t18x_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
dev_dbg(dev, "at %s:%d\n", __func__, __LINE__);
d->runtime_suspend = __nvadsp_t18x_runtime_suspend;
d->runtime_resume = __nvadsp_t18x_runtime_resume;
d->runtime_idle = __nvadsp_t18x_runtime_idle;
return 0;
}
#endif /* CONFIG_PM */
static int __assert_t18x_adsp(struct nvadsp_drv_data *d)
{
struct platform_device *pdev = d->pdev;
struct device *dev = &pdev->dev;
int ret = 0;
/*
* The ADSP_ALL reset in BPMP-FW is overloaded to assert
* all 7 resets i.e. ADSP, ADSPINTF, ADSPDBG, ADSPNEON,
* ADSPPERIPH, ADSPSCU and ADSPWDT resets. So resetting
* only ADSP reset is sufficient to reset all ADSP sub-modules.
*/
ret = reset_control_assert(d->adspall_rst);
if (ret) {
dev_err(dev, "failed to assert adsp\n");
goto end;
}
/* APE_TKE reset */
if (d->ape_tke_rst) {
ret = reset_control_assert(d->ape_tke_rst);
if (ret)
dev_err(dev, "failed to assert ape_tke\n");
}
end:
return ret;
}
static int __deassert_t18x_adsp(struct nvadsp_drv_data *d)
{
struct platform_device *pdev = d->pdev;
struct device *dev = &pdev->dev;
int ret = 0;
/* APE_TKE reset */
if (d->ape_tke_rst) {
ret = reset_control_deassert(d->ape_tke_rst);
if (ret) {
dev_err(dev, "failed to deassert ape_tke\n");
goto end;
}
}
/*
* The ADSP_ALL reset in BPMP-FW is overloaded to de-assert
* all 7 resets i.e. ADSP, ADSPINTF, ADSPDBG, ADSPNEON, ADSPPERIPH,
* ADSPSCU and ADSPWDT resets. The BPMP-FW also takes care
* of specific de-assert sequence and delays between them.
* So de-resetting only ADSP reset is sufficient to de-reset
* all ADSP sub-modules.
*/
ret = reset_control_deassert(d->adspall_rst);
if (ret)
dev_err(dev, "failed to deassert adsp\n");
end:
return ret;
}
#ifdef CONFIG_TEGRA_VIRT_AUDIO_IVC
static int __virt_assert_t18x_adsp(struct nvadsp_drv_data *d)
{
int err;
struct nvaudio_ivc_msg msg;
struct nvaudio_ivc_ctxt *hivc_client = nvaudio_get_ivc_alloc_ctxt();
if (!hivc_client) {
pr_err("%s: Failed to allocate IVC context\n", __func__);
return -ENODEV;
}
memset(&msg, 0, sizeof(struct nvaudio_ivc_msg));
msg.cmd = NVAUDIO_ADSP_RESET;
msg.params.adsp_reset_info.reset_req = ASSERT;
msg.ack_required = true;
err = nvaudio_ivc_send_receive(hivc_client,
&msg,
sizeof(struct nvaudio_ivc_msg));
if (err < 0)
pr_err("%s: error on ivc_send_receive\n", __func__);
return 0;
}
static int __virt_deassert_t18x_adsp(struct nvadsp_drv_data *d)
{
int err;
struct nvaudio_ivc_msg msg;
struct nvaudio_ivc_ctxt *hivc_client = nvaudio_get_ivc_alloc_ctxt();
if (!hivc_client) {
pr_err("%s: Failed to allocate IVC context\n", __func__);
return -ENODEV;
}
memset(&msg, 0, sizeof(struct nvaudio_ivc_msg));
msg.cmd = NVAUDIO_ADSP_RESET;
msg.params.adsp_reset_info.reset_req = DEASSERT;
msg.ack_required = true;
err = nvaudio_ivc_send_receive(hivc_client,
&msg,
sizeof(struct nvaudio_ivc_msg));
if (err < 0)
pr_err("%s: error on ivc_send_receive\n", __func__);
return 0;
}
#endif
int nvadsp_reset_t18x_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret = 0;
#ifdef CONFIG_TEGRA_VIRT_AUDIO_IVC
if (is_tegra_hypervisor_mode()) {
d->assert_adsp = __virt_assert_t18x_adsp;
d->deassert_adsp = __virt_deassert_t18x_adsp;
d->adspall_rst = NULL;
return 0;
}
#endif
d->assert_adsp = __assert_t18x_adsp;
d->deassert_adsp = __deassert_t18x_adsp;
d->adspall_rst = devm_reset_control_get(dev, "adspall");
if (IS_ERR(d->adspall_rst)) {
dev_err(dev, "can not get adspall reset\n");
ret = PTR_ERR(d->adspall_rst);
goto end;
}
d->ape_tke_rst = devm_reset_control_get(dev, "ape_tke");
if (IS_ERR(d->ape_tke_rst))
d->ape_tke_rst = NULL;
end:
return ret;
}

View File

@@ -0,0 +1,23 @@
/*
* Copyright (C) 2015-2021, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_DEV_T18X_H
#define __TEGRA_NVADSP_DEV_T18X_H
int nvadsp_acast_t18x_init(struct platform_device *pdev);
int nvadsp_reset_t18x_init(struct platform_device *pdev);
int nvadsp_os_t18x_init(struct platform_device *pdev);
int nvadsp_pm_t18x_init(struct platform_device *pdev);
#endif /* __TEGRA_NVADSP_DEV_T18X_H */

View File

@@ -0,0 +1,306 @@
/*
* dev-t21x.c
*
* A device driver for ADSP and APE
*
* Copyright (C) 2014-2017, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/tegra_nvadsp.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <linux/reset.h>
#include "dev.h"
#include "amc.h"
#include "dev-t21x.h"
#ifdef CONFIG_PM
static void nvadsp_clocks_disable(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
if (drv_data->adsp_clk) {
clk_disable_unprepare(drv_data->adsp_clk);
dev_dbg(dev, "adsp clocks disabled\n");
drv_data->adsp_clk = NULL;
}
if (drv_data->adsp_cpu_abus_clk) {
clk_disable_unprepare(drv_data->adsp_cpu_abus_clk);
dev_dbg(dev, "adsp cpu abus clock disabled\n");
drv_data->adsp_cpu_abus_clk = NULL;
}
if (drv_data->adsp_neon_clk) {
clk_disable_unprepare(drv_data->adsp_neon_clk);
dev_dbg(dev, "adsp_neon clocks disabled\n");
drv_data->adsp_neon_clk = NULL;
}
if (drv_data->ape_clk) {
clk_disable_unprepare(drv_data->ape_clk);
dev_dbg(dev, "ape clock disabled\n");
drv_data->ape_clk = NULL;
}
if (drv_data->apb2ape_clk) {
clk_disable_unprepare(drv_data->apb2ape_clk);
dev_dbg(dev, "apb2ape clock disabled\n");
drv_data->apb2ape_clk = NULL;
}
}
static int nvadsp_clocks_enable(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret = 0;
drv_data->ape_clk = devm_clk_get(dev, "adsp.ape");
if (IS_ERR_OR_NULL(drv_data->ape_clk)) {
dev_err(dev, "unable to find adsp.ape clock\n");
ret = PTR_ERR(drv_data->ape_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->ape_clk);
if (ret) {
dev_err(dev, "unable to enable adsp.ape clock\n");
goto end;
}
dev_dbg(dev, "ape clock enabled\n");
drv_data->adsp_clk = devm_clk_get(dev, "adsp");
if (IS_ERR_OR_NULL(drv_data->adsp_clk)) {
dev_err(dev, "unable to find adsp clock\n");
ret = PTR_ERR(drv_data->adsp_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->adsp_clk);
if (ret) {
dev_err(dev, "unable to enable adsp clock\n");
goto end;
}
drv_data->adsp_cpu_abus_clk = devm_clk_get(dev, "adsp_cpu_abus");
if (IS_ERR_OR_NULL(drv_data->adsp_cpu_abus_clk)) {
dev_err(dev, "unable to find adsp cpu abus clock\n");
ret = PTR_ERR(drv_data->adsp_cpu_abus_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->adsp_cpu_abus_clk);
if (ret) {
dev_err(dev, "unable to enable adsp cpu abus clock\n");
goto end;
}
drv_data->adsp_neon_clk = devm_clk_get(dev, "adspneon");
if (IS_ERR_OR_NULL(drv_data->adsp_neon_clk)) {
dev_err(dev, "unable to find adsp neon clock\n");
ret = PTR_ERR(drv_data->adsp_neon_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->adsp_neon_clk);
if (ret) {
dev_err(dev, "unable to enable adsp neon clock\n");
goto end;
}
dev_dbg(dev, "adsp cpu clock enabled\n");
drv_data->apb2ape_clk = devm_clk_get(dev, "adsp.apb2ape");
if (IS_ERR_OR_NULL(drv_data->apb2ape_clk)) {
dev_err(dev, "unable to find adsp.apb2ape clk\n");
ret = PTR_ERR(drv_data->apb2ape_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->apb2ape_clk);
if (ret) {
dev_err(dev, "unable to enable adsp.apb2ape clock\n");
goto end;
}
/* AHUB clock, UART clock is not being enabled as UART by default is
* disabled on t210
*/
dev_dbg(dev, "all clocks enabled\n");
return 0;
end:
nvadsp_clocks_disable(pdev);
return ret;
}
static inline bool nvadsp_amsic_skip_reg(u32 offset)
{
if (offset == AMISC_ADSP_L2_REGFILEBASE ||
offset == AMISC_SHRD_SMP_STA ||
(offset >= AMISC_SEM_REG_START && offset <= AMISC_SEM_REG_END) ||
offset == AMISC_TSC ||
offset == AMISC_ACTMON_AVG_CNT) {
return true;
} else {
return false;
}
}
static int nvadsp_amisc_save(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
u32 val, offset;
int i = 0;
offset = AMISC_REG_START_OFFSET;
while (offset <= AMISC_REG_MBOX_OFFSET) {
if (nvadsp_amsic_skip_reg(offset)) {
offset += 4;
continue;
}
val = readl(d->base_regs[AMISC] + offset);
d->state.amisc_regs[i++] = val;
offset += 4;
}
offset = ADSP_ACTMON_REG_START_OFFSET;
while (offset <= ADSP_ACTMON_REG_END_OFFSET) {
if (nvadsp_amsic_skip_reg(offset)) {
offset += 4;
continue;
}
val = readl(d->base_regs[AMISC] + offset);
d->state.amisc_regs[i++] = val;
offset += 4;
}
return 0;
}
static int nvadsp_amisc_restore(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
u32 val, offset;
int i = 0;
offset = AMISC_REG_START_OFFSET;
while (offset <= AMISC_REG_MBOX_OFFSET) {
if (nvadsp_amsic_skip_reg(offset)) {
offset += 4;
continue;
}
val = d->state.amisc_regs[i++];
writel(val, d->base_regs[AMISC] + offset);
offset += 4;
}
offset = ADSP_ACTMON_REG_START_OFFSET;
while (offset <= ADSP_ACTMON_REG_END_OFFSET) {
if (nvadsp_amsic_skip_reg(offset)) {
offset += 4;
continue;
}
val = d->state.amisc_regs[i++];
writel(val, d->base_regs[AMISC] + offset);
offset += 4;
}
return 0;
}
static int __nvadsp_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = 0;
dev_dbg(dev, "restoring adsp base regs\n");
drv_data->base_regs = drv_data->base_regs_saved;
dev_dbg(dev, "enabling clocks\n");
ret = nvadsp_clocks_enable(pdev);
if (ret) {
dev_err(dev, "nvadsp_clocks_enable failed\n");
goto skip;
}
if (!drv_data->adsp_os_suspended) {
dev_dbg(dev, "%s: adsp os is not suspended\n", __func__);
goto skip;
}
dev_dbg(dev, "restoring ape state\n");
nvadsp_amc_restore(pdev);
nvadsp_aram_restore(pdev);
nvadsp_amisc_restore(pdev);
skip:
return ret;
}
static int __nvadsp_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = 0;
if (!drv_data->adsp_os_suspended) {
dev_dbg(dev, "%s: adsp os is not suspended\n", __func__);
goto clocks;
}
dev_dbg(dev, "saving amsic\n");
nvadsp_amisc_save(pdev);
dev_dbg(dev, "saving aram\n");
nvadsp_aram_save(pdev);
dev_dbg(dev, "saving amc\n");
nvadsp_amc_save(pdev);
clocks:
dev_dbg(dev, "disabling clocks\n");
nvadsp_clocks_disable(pdev);
dev_dbg(dev, "locking out adsp base regs\n");
drv_data->base_regs = NULL;
return ret;
}
static int __nvadsp_runtime_idle(struct device *dev)
{
return 0;
}
int nvadsp_pm_t21x_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
drv_data->runtime_suspend = __nvadsp_runtime_suspend;
drv_data->runtime_resume = __nvadsp_runtime_resume;
drv_data->runtime_idle = __nvadsp_runtime_idle;
return 0;
}
#endif /* CONFIG_PM */
int nvadsp_reset_t21x_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret = 0;
drv_data->adspall_rst = devm_reset_control_get(dev, "adspall");
if (IS_ERR_OR_NULL(drv_data->adspall_rst)) {
ret = PTR_ERR(drv_data->adspall_rst);
dev_err(dev, "unable to get adspall reset %d\n", ret);
}
return ret;
}

View File

@@ -0,0 +1,22 @@
/*
* Copyright (C) 2015-2017, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_DEV_T21X_H
#define __TEGRA_NVADSP_DEV_T21X_H
int nvadsp_reset_t21x_init(struct platform_device *pdev);
int nvadsp_os_t21x_init(struct platform_device *pdev);
int nvadsp_pm_t21x_init(struct platform_device *pdev);
#endif /* __TEGRA_NVADSP_DEV_T21X_H */

View File

@@ -0,0 +1,674 @@
/*
* dev.c
*
* A device driver for ADSP and APE
*
* Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/tegra_nvadsp.h>
#include <linux/version.h>
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
#include <soc/tegra/chip-id.h>
#else
#include <soc/tegra/fuse.h>
#endif
#include <linux/pm_runtime.h>
#if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
#include <linux/tegra_pm_domains.h>
#endif
#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <asm/arch_timer.h>
#include <linux/irqchip/tegra-agic.h>
#include "dev.h"
#include "os.h"
#include "amc.h"
#include "ape_actmon.h"
#include "aram_manager.h"
#include "dev-t21x.h"
#include "dev-t18x.h"
static struct nvadsp_drv_data *nvadsp_drv_data;
#ifdef CONFIG_DEBUG_FS
static int __init adsp_debug_init(struct nvadsp_drv_data *drv_data)
{
drv_data->adsp_debugfs_root = debugfs_create_dir("tegra_ape", NULL);
if (!drv_data->adsp_debugfs_root)
return -ENOMEM;
return 0;
}
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_PM
static int nvadsp_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = -EINVAL;
if (drv_data->runtime_resume)
ret = drv_data->runtime_resume(dev);
return ret;
}
static int nvadsp_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = -EINVAL;
if (drv_data->runtime_suspend)
ret = drv_data->runtime_suspend(dev);
return ret;
}
static int nvadsp_runtime_idle(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = 0;
if (drv_data->runtime_idle)
ret = drv_data->runtime_idle(dev);
return ret;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int nvadsp_suspend(struct device *dev)
{
if (pm_runtime_status_suspended(dev))
return 0;
return nvadsp_runtime_suspend(dev);
}
static int nvadsp_resume(struct device *dev)
{
if (pm_runtime_status_suspended(dev))
return 0;
return nvadsp_runtime_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops nvadsp_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(nvadsp_suspend, nvadsp_resume)
SET_RUNTIME_PM_OPS(nvadsp_runtime_suspend, nvadsp_runtime_resume,
nvadsp_runtime_idle)
};
uint64_t nvadsp_get_timestamp_counter(void)
{
#if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
return arch_counter_get_cntvct();
#else
return __arch_counter_get_cntvct_stable();
#endif
}
EXPORT_SYMBOL(nvadsp_get_timestamp_counter);
int nvadsp_set_bw(struct nvadsp_drv_data *drv_data, u32 efreq)
{
int ret = -EINVAL;
if (drv_data->bwmgr)
ret = tegra_bwmgr_set_emc(drv_data->bwmgr, efreq * 1000,
TEGRA_BWMGR_SET_EMC_FLOOR);
#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
else if (drv_data->icc_path_handle)
ret = icc_set_bw(drv_data->icc_path_handle, 0,
(unsigned long)FREQ2ICC(efreq * 1000));
#endif
if (ret)
dev_err(&drv_data->pdev->dev,
"failed to set emc freq rate:%d\n", ret);
return ret;
}
static void nvadsp_bw_register(struct nvadsp_drv_data *drv_data)
{
struct device *dev = &drv_data->pdev->dev;
switch (tegra_get_chip_id()) {
case TEGRA210:
case TEGRA186:
case TEGRA194:
drv_data->bwmgr = tegra_bwmgr_register(
TEGRA_BWMGR_CLIENT_APE_ADSP);
if (IS_ERR(drv_data->bwmgr)) {
dev_err(dev, "unable to register bwmgr\n");
drv_data->bwmgr = NULL;
}
break;
default:
#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
if (!is_tegra_hypervisor_mode()) {
/* Interconnect Support */
#ifdef CONFIG_ARCH_TEGRA_23x_SOC
drv_data->icc_path_handle = icc_get(dev, TEGRA_ICC_APE,
TEGRA_ICC_PRIMARY);
#endif
if (IS_ERR(drv_data->icc_path_handle)) {
dev_err(dev,
"%s: Failed to register Interconnect err=%ld\n",
__func__, PTR_ERR(drv_data->icc_path_handle));
drv_data->icc_path_handle = NULL;
}
}
#endif
break;
}
}
static void nvadsp_bw_unregister(struct nvadsp_drv_data *drv_data)
{
nvadsp_set_bw(drv_data, 0);
if (drv_data->bwmgr) {
tegra_bwmgr_unregister(drv_data->bwmgr);
drv_data->bwmgr = NULL;
}
#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
if (drv_data->icc_path_handle) {
icc_put(drv_data->icc_path_handle);
drv_data->icc_path_handle = NULL;
}
#endif
}
static int __init nvadsp_parse_co_mem(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct device_node *node;
int err = 0;
node = of_parse_phandle(dev->of_node, "nvidia,adsp_co", 0);
if (!node)
return 0;
if (!of_device_is_available(node))
goto exit;
err = of_address_to_resource(node, 0, &drv_data->co_mem);
if (err) {
dev_err(dev, "cannot get adsp CO memory (%d)\n", err);
goto exit;
}
drv_data->adsp_mem[ADSP_OS_SIZE] = resource_size(&drv_data->co_mem);
exit:
of_node_put(node);
return err;
}
static void __init nvadsp_parse_clk_entries(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
u32 val32 = 0;
/* Optional properties, should come from platform dt files */
if (of_property_read_u32(dev->of_node, "nvidia,adsp_freq", &val32))
dev_dbg(dev, "adsp_freq dt not found\n");
else {
drv_data->adsp_freq = val32;
drv_data->adsp_freq_hz = val32 * 1000;
}
if (of_property_read_u32(dev->of_node, "nvidia,ape_freq", &val32))
dev_dbg(dev, "ape_freq dt not found\n");
else
drv_data->ape_freq = val32;
if (of_property_read_u32(dev->of_node, "nvidia,ape_emc_freq", &val32))
dev_dbg(dev, "ape_emc_freq dt not found\n");
else
drv_data->ape_emc_freq = val32;
}
static int __init nvadsp_parse_dt(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
const char *adsp_elf;
u32 *adsp_reset;
u32 *adsp_mem;
int iter;
adsp_reset = drv_data->unit_fpga_reset;
adsp_mem = drv_data->adsp_mem;
for (iter = 0; iter < ADSP_MEM_END; iter++) {
if (of_property_read_u32_index(dev->of_node, "nvidia,adsp_mem",
iter, &adsp_mem[iter])) {
dev_err(dev, "adsp memory dt %d not found\n", iter);
return -EINVAL;
}
}
for (iter = 0; iter < ADSP_EVP_END; iter++) {
if (of_property_read_u32_index(dev->of_node,
"nvidia,adsp-evp-base",
iter, &drv_data->evp_base[iter])) {
dev_err(dev, "adsp memory dt %d not found\n", iter);
return -EINVAL;
}
}
if (!of_property_read_string(dev->of_node,
"nvidia,adsp_elf", &adsp_elf)) {
if (strlen(adsp_elf) < MAX_FW_STR)
strcpy(drv_data->adsp_elf, adsp_elf);
else {
dev_err(dev, "invalid string in nvidia,adsp_elf\n");
return -EINVAL;
}
} else
strcpy(drv_data->adsp_elf, NVADSP_ELF);
drv_data->adsp_unit_fpga = of_property_read_bool(dev->of_node,
"nvidia,adsp_unit_fpga");
drv_data->adsp_os_secload = of_property_read_bool(dev->of_node,
"nvidia,adsp_os_secload");
if (of_property_read_u32(dev->of_node, "nvidia,tegra_platform",
&drv_data->tegra_platform))
dev_dbg(dev, "tegra_platform dt not found\n");
if (of_property_read_u32(dev->of_node, "nvidia,adsp_load_timeout",
&drv_data->adsp_load_timeout))
dev_dbg(dev, "adsp_load_timeout dt not found\n");
if (drv_data->adsp_unit_fpga) {
for (iter = 0; iter < ADSP_UNIT_FPGA_RESET_END; iter++) {
if (of_property_read_u32_index(dev->of_node,
"nvidia,adsp_unit_fpga_reset", iter,
&adsp_reset[iter])) {
dev_err(dev, "adsp reset dt %d not found\n",
iter);
return -EINVAL;
}
}
}
nvadsp_parse_clk_entries(pdev);
if (nvadsp_parse_co_mem(pdev))
return -ENOMEM;
drv_data->state.evp = devm_kzalloc(dev,
drv_data->evp_base[ADSP_EVP_SIZE], GFP_KERNEL);
if (!drv_data->state.evp)
return -ENOMEM;
return 0;
}
static int __init nvadsp_probe(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data;
struct device *dev = &pdev->dev;
struct resource *res = NULL;
void __iomem *base = NULL;
uint32_t aram_addr;
uint32_t aram_size;
int dram_iter;
int irq_iter;
int ret = 0;
int iter;
dev_info(dev, "in probe()...\n");
drv_data = devm_kzalloc(dev, sizeof(*drv_data),
GFP_KERNEL);
if (!drv_data) {
dev_err(&pdev->dev, "Failed to allocate driver data");
ret = -ENOMEM;
goto out;
}
platform_set_drvdata(pdev, drv_data);
drv_data->pdev = pdev;
drv_data->chip_data = of_device_get_match_data(dev);
ret = nvadsp_parse_dt(pdev);
if (ret)
goto out;
#ifdef CONFIG_PM
ret = nvadsp_pm_init(pdev);
if (ret) {
dev_err(dev, "Failed in pm init");
goto out;
}
#endif
#ifdef CONFIG_DEBUG_FS
if (adsp_debug_init(drv_data))
dev_err(dev,
"unable to create tegra_ape debug fs directory\n");
#endif
drv_data->base_regs =
devm_kzalloc(dev, sizeof(void *) * APE_MAX_REG,
GFP_KERNEL);
if (!drv_data->base_regs) {
dev_err(dev, "Failed to allocate regs");
ret = -ENOMEM;
goto out;
}
for (iter = 0; iter < APE_MAX_REG; iter++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, iter);
if (!res) {
dev_err(dev,
"Failed to get resource with ID %d\n",
iter);
ret = -EINVAL;
goto out;
}
if (!drv_data->adsp_unit_fpga && iter == UNIT_FPGA_RST)
continue;
/*
* skip if the particular module is not present in a
* generation, for which the register start address
* is made 0 from dt.
*/
if (res->start == 0)
continue;
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
dev_err(dev, "Failed to iomap resource reg[%d]\n",
iter);
ret = PTR_ERR(base);
goto out;
}
drv_data->base_regs[iter] = base;
nvadsp_add_load_mappings(res->start, (void __force *)base,
resource_size(res));
}
drv_data->base_regs_saved = drv_data->base_regs;
for (dram_iter = 0; dram_iter < ADSP_MAX_DRAM_MAP; dram_iter++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, iter++);
if (!res) {
dev_err(dev,
"Failed to get DRAM map with ID %d\n", iter);
ret = -EINVAL;
goto out;
}
drv_data->dram_region[dram_iter] = res;
}
for (irq_iter = 0; irq_iter < NVADSP_VIRQ_MAX; irq_iter++) {
res = platform_get_resource(pdev, IORESOURCE_IRQ, irq_iter);
if (!res) {
dev_err(dev, "Failed to get irq number for index %d\n",
irq_iter);
ret = -EINVAL;
goto out;
}
drv_data->agic_irqs[irq_iter] = res->start;
}
nvadsp_drv_data = drv_data;
#ifdef CONFIG_PM
#if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
tegra_pd_add_device(dev);
#endif
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto out;
#endif
ret = nvadsp_hwmbox_init(pdev);
if (ret)
goto err;
ret = nvadsp_mbox_init(pdev);
if (ret)
goto err;
#ifdef CONFIG_TEGRA_ADSP_ACTMON
ret = ape_actmon_probe(pdev);
if (ret)
goto err;
#endif
ret = nvadsp_os_probe(pdev);
if (ret)
goto err;
ret = nvadsp_reset_init(pdev);
if (ret) {
dev_err(dev, "Failed initialize resets\n");
goto err;
}
ret = nvadsp_app_module_probe(pdev);
if (ret)
goto err;
aram_addr = drv_data->adsp_mem[ARAM_ALIAS_0_ADDR];
aram_size = drv_data->adsp_mem[ARAM_ALIAS_0_SIZE];
ret = nvadsp_aram_init(aram_addr, aram_size);
if (ret)
dev_err(dev, "Failed to init aram\n");
nvadsp_bw_register(drv_data);
if (!drv_data->adsp_os_secload) {
ret = nvadsp_acast_init(pdev);
if (ret)
goto err;
}
err:
#ifdef CONFIG_PM
ret = pm_runtime_put_sync(dev);
if (ret < 0)
dev_err(dev, "pm_runtime_put_sync failed\n");
#endif
out:
return ret;
}
static int nvadsp_remove(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
nvadsp_bw_unregister(drv_data);
nvadsp_aram_exit();
pm_runtime_disable(&pdev->dev);
#ifdef CONFIG_PM
if (!pm_runtime_status_suspended(&pdev->dev))
nvadsp_runtime_suspend(&pdev->dev);
#endif
#if KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE
tegra_pd_remove_device(&pdev->dev);
#endif
return 0;
}
#ifdef CONFIG_OF
static struct nvadsp_chipdata tegra210_adsp_chipdata = {
.hwmb = {
.reg_idx = AMISC,
.hwmbox0_reg = 0x58,
.hwmbox1_reg = 0X5C,
.hwmbox2_reg = 0x60,
.hwmbox3_reg = 0x64,
},
.adsp_state_hwmbox = 0,
.adsp_thread_hwmbox = 0,
.adsp_irq_hwmbox = 0,
.adsp_shared_mem_hwmbox = 0,
.adsp_os_config_hwmbox = 0,
.reset_init = nvadsp_reset_t21x_init,
.os_init = nvadsp_os_t21x_init,
#ifdef CONFIG_PM
.pm_init = nvadsp_pm_t21x_init,
#endif
.wdt_irq = INT_T210_ADSP_WDT,
.start_irq = INT_T210_AGIC_START,
.end_irq = INT_T210_AGIC_END,
.amc_err_war = true,
};
static struct nvadsp_chipdata tegrat18x_adsp_chipdata = {
.hwmb = {
.reg_idx = AHSP,
.hwmbox0_reg = 0x00000,
.hwmbox1_reg = 0X08000,
.hwmbox2_reg = 0X10000,
.hwmbox3_reg = 0X18000,
.hwmbox4_reg = 0X20000,
.hwmbox5_reg = 0X28000,
.hwmbox6_reg = 0X30000,
.hwmbox7_reg = 0X38000,
.empty_int_ie = 0x8,
},
.adsp_shared_mem_hwmbox = 0x18000, /* HWMBOX3 */
.adsp_thread_hwmbox = 0x20000, /* HWMBOX4 */
.adsp_os_config_hwmbox = 0X28000, /*HWMBOX5 */
.adsp_state_hwmbox = 0x30000, /* HWMBOX6 */
.adsp_irq_hwmbox = 0x38000, /* HWMBOX7 */
.acast_init = nvadsp_acast_t18x_init,
.reset_init = nvadsp_reset_t18x_init,
.os_init = nvadsp_os_t18x_init,
#ifdef CONFIG_PM
.pm_init = nvadsp_pm_t18x_init,
#endif
.wdt_irq = INT_T18x_ATKE_WDT_IRQ,
.start_irq = INT_T18x_AGIC_START,
.end_irq = INT_T18x_AGIC_END,
.amc_err_war = true,
};
static struct nvadsp_chipdata tegra239_adsp_chipdata = {
.hwmb = {
.reg_idx = AHSP,
.hwmbox0_reg = 0x00000,
.hwmbox1_reg = 0X08000,
.hwmbox2_reg = 0X10000,
.hwmbox3_reg = 0X18000,
.hwmbox4_reg = 0X20000,
.hwmbox5_reg = 0X28000,
.hwmbox6_reg = 0X30000,
.hwmbox7_reg = 0X38000,
.empty_int_ie = 0x8,
},
.adsp_shared_mem_hwmbox = 0x18000, /* HWMBOX3 */
.adsp_thread_hwmbox = 0x20000, /* HWMBOX4 */
.adsp_os_config_hwmbox = 0X28000, /* HWMBOX5 */
.adsp_state_hwmbox = 0x30000, /* HWMBOX6 */
.adsp_irq_hwmbox = 0x38000, /* HWMBOX7 */
.acast_init = nvadsp_acast_t18x_init,
.reset_init = nvadsp_reset_t18x_init,
.os_init = nvadsp_os_t18x_init,
#ifdef CONFIG_PM
.pm_init = nvadsp_pm_t18x_init,
#endif
.wdt_irq = INT_T18x_ATKE_WDT_IRQ,
.start_irq = INT_T18x_AGIC_START,
.end_irq = INT_T18x_AGIC_END,
.amc_err_war = false,
/* Populate Chip ID Major Revision as well */
.chipid_ext = true,
};
static const struct of_device_id nvadsp_of_match[] = {
{
.compatible = "nvidia,tegra210-adsp",
.data = &tegra210_adsp_chipdata,
}, {
.compatible = "nvidia,tegra18x-adsp",
.data = &tegrat18x_adsp_chipdata,
}, {
.compatible = "nvidia,tegra239-adsp",
.data = &tegra239_adsp_chipdata,
}, {
},
};
#endif
static struct platform_driver nvadsp_driver __refdata = {
.driver = {
.name = "nvadsp",
.owner = THIS_MODULE,
.pm = &nvadsp_pm_ops,
.of_match_table = of_match_ptr(nvadsp_of_match),
},
.probe = nvadsp_probe,
.remove = nvadsp_remove,
};
static int __init nvadsp_init(void)
{
return platform_driver_register(&nvadsp_driver);
}
static void __exit nvadsp_exit(void)
{
platform_driver_unregister(&nvadsp_driver);
}
module_init(nvadsp_init);
module_exit(nvadsp_exit);
MODULE_AUTHOR("NVIDIA");
MODULE_DESCRIPTION("Tegra Host ADSP Driver");
MODULE_VERSION("1.0");
MODULE_LICENSE("Dual BSD/GPL");

View File

@@ -0,0 +1,341 @@
/*
* dev.h
*
* A header file for Host driver for ADSP and APE
*
* Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_DEV_H
#define __TEGRA_NVADSP_DEV_H
#include <linux/tegra_nvadsp.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/debugfs.h>
#include <linux/platform/tegra/emc_bwmgr.h>
#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
#ifdef CONFIG_ARCH_TEGRA_23x_SOC
#include <linux/platform/tegra/mc_utils.h>
#include <dt-bindings/interconnect/tegra_icc_id.h>
#endif
#include <linux/interconnect.h>
#endif
#include "hwmailbox.h"
#include "amc.h"
/*
* Note: These enums should be aligned to the regs mentioned in the
* device tree
*/
enum {
AMC,
AMISC,
ABRIDGE,
UNIT_FPGA_RST,
AHSP,
APE_MAX_REG
};
enum {
ADSP_DRAM1,
ADSP_DRAM2,
ADSP_MAX_DRAM_MAP
};
/*
* Note: These enums should be aligned to the adsp_mem node mentioned in the
* device tree
*/
enum adsp_mem_dt {
ADSP_OS_ADDR,
ADSP_OS_SIZE,
ADSP_APP_ADDR,
ADSP_APP_SIZE,
ARAM_ALIAS_0_ADDR,
ARAM_ALIAS_0_SIZE,
ACSR_ADDR, /* ACSR: ADSP CPU SHARED REGION */
ACSR_SIZE,
ADSP_MEM_END,
};
enum adsp_evp_dt {
ADSP_EVP_BASE,
ADSP_EVP_SIZE,
ADSP_EVP_END,
};
enum adsp_unit_fpga_reset {
ADSP_ASSERT,
ADSP_DEASSERT,
ADSP_UNIT_FPGA_RESET_END,
};
#define AMISC_REGS 0x2000
#define AMISC_ADSP_L2_REGFILEBASE 0x10
#define AMISC_SHRD_SMP_STA 0x14
#define AMISC_SEM_REG_START 0x1c
#define AMISC_SEM_REG_END 0x44
#define AMISC_TSC 0x48
#define AMISC_ACTMON_AVG_CNT 0x81c
#define AMISC_REG_START_OFFSET 0x0
#define AMISC_REG_MBOX_OFFSET 0x64
#define ADSP_ACTMON_REG_START_OFFSET 0x800
#define ADSP_ACTMON_REG_END_OFFSET 0x828
#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
#ifdef CONFIG_ARCH_TEGRA_23x_SOC
#define FREQ2ICC(x) (Bps_to_icc(emc_freq_to_bw(x)))
#else
#define FREQ2ICC(x) 0UL
#endif
#endif
#define NVADSP_ELF "adsp.elf"
#define MAX_FW_STR 30
enum nvadsp_virqs {
MBOX_SEND_VIRQ,
MBOX_RECV_VIRQ,
WDT_VIRQ,
WFI_VIRQ,
AMC_ERR_VIRQ,
ACTMON_VIRQ,
NVADSP_VIRQ_MAX,
};
struct nvadsp_pm_state {
u32 aram[AMC_ARAM_WSIZE];
uint32_t amc_regs[AMC_REGS];
uint32_t amisc_regs[AMISC_REGS];
u32 *evp;
void *evp_ptr;
};
struct nvadsp_hwmb {
u32 reg_idx;
u32 hwmbox0_reg;
u32 hwmbox1_reg;
u32 hwmbox2_reg;
u32 hwmbox3_reg;
u32 hwmbox4_reg;
u32 hwmbox5_reg;
u32 hwmbox6_reg;
u32 hwmbox7_reg;
u32 empty_int_ie;
};
typedef int (*acast_init) (struct platform_device *pdev);
typedef int (*reset_init) (struct platform_device *pdev);
typedef int (*os_init) (struct platform_device *pdev);
#ifdef CONFIG_PM
typedef int (*pm_init) (struct platform_device *pdev);
#endif
struct nvadsp_chipdata {
struct nvadsp_hwmb hwmb;
u32 adsp_state_hwmbox;
u32 adsp_thread_hwmbox;
u32 adsp_irq_hwmbox;
u32 adsp_shared_mem_hwmbox;
u32 adsp_os_config_hwmbox;
acast_init acast_init;
reset_init reset_init;
os_init os_init;
#ifdef CONFIG_PM
pm_init pm_init;
#endif
int wdt_irq;
int start_irq;
int end_irq;
bool amc_err_war;
bool chipid_ext;
};
struct nvadsp_drv_data {
void __iomem **base_regs;
void __iomem **base_regs_saved;
struct platform_device *pdev;
struct resource *dram_region[ADSP_MAX_DRAM_MAP];
struct hwmbox_queue hwmbox_send_queue;
struct nvadsp_mbox **mboxes;
unsigned long *mbox_ids;
spinlock_t mbox_lock;
#ifdef CONFIG_DEBUG_FS
struct dentry *adsp_debugfs_root;
#endif
struct clk *ape_clk;
struct clk *apb2ape_clk;
struct clk *adsp_clk;
struct clk *aclk_clk;
struct clk *adsp_cpu_abus_clk;
struct clk *adsp_neon_clk;
struct clk *uartape_clk;
struct clk *ahub_clk;
unsigned long adsp_freq; /* in KHz*/
unsigned long adsp_freq_hz; /* in Hz*/
unsigned long ape_freq; /* in KHz*/
unsigned long ape_emc_freq; /* in KHz*/
int (*runtime_suspend)(struct device *dev);
int (*runtime_resume)(struct device *dev);
int (*runtime_idle)(struct device *dev);
int (*assert_adsp)(struct nvadsp_drv_data *drv_data);
int (*deassert_adsp)(struct nvadsp_drv_data *drv_data);
struct reset_control *adspall_rst;
struct reset_control *ape_tke_rst;
struct nvadsp_pm_state state;
bool adsp_os_running;
bool adsp_os_suspended;
bool adsp_os_secload;
void *shared_adsp_os_data;
dma_addr_t shared_adsp_os_data_iova;
#ifdef CONFIG_TEGRA_ADSP_DFS
bool dfs_initialized;
#endif
#ifdef CONFIG_TEGRA_ADSP_ACTMON
bool actmon_initialized;
#endif
#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
bool cpustat_initialized;
#endif
#if defined(CONFIG_TEGRA_ADSP_FILEIO)
bool adspff_init;
#endif
#ifdef CONFIG_TEGRA_ADSP_LPTHREAD
bool lpthread_initialized;
#endif
wait_queue_head_t adsp_health_waitq;
bool adsp_crashed;
u32 adsp_mem[ADSP_MEM_END];
bool adsp_unit_fpga;
u32 unit_fpga_reset[ADSP_UNIT_FPGA_RESET_END];
u32 agic_irqs[NVADSP_VIRQ_MAX];
struct tegra_bwmgr_client *bwmgr;
#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
struct icc_path *icc_path_handle; /* icc_path handle handle */
#endif
u32 evp_base[ADSP_EVP_END];
const struct nvadsp_chipdata *chip_data;
/* CO mem in backdoor boot */
struct resource co_mem;
/* enum tegra_platform */
u32 tegra_platform;
/* "nvidia,adsp_load_timeout" (in ms) */
u32 adsp_load_timeout;
/* "nvidia,adsp_elf" (FW for backdoor boot) */
char adsp_elf[MAX_FW_STR];
};
#define ADSP_CONFIG 0x04
#define MAXCLKLATENCY (3 << 29)
#define UART_BAUD_RATE 9600
status_t nvadsp_mbox_init(struct platform_device *pdev);
int nvadsp_setup_amc_interrupts(struct platform_device *pdev);
void nvadsp_free_amc_interrupts(struct platform_device *pdev);
int nvadsp_set_bw(struct nvadsp_drv_data *drv, u32 efreq);
#ifdef CONFIG_TEGRA_ADSP_DFS
void adsp_cpu_set_rate(unsigned long freq);
int adsp_dfs_core_init(struct platform_device *pdev);
int adsp_dfs_core_exit(struct platform_device *pdev);
u32 adsp_to_emc_freq(u32 adspfreq);
#endif
#ifdef CONFIG_TEGRA_ADSP_ACTMON
int ape_actmon_probe(struct platform_device *pdev);
#endif
#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
int adsp_cpustat_init(struct platform_device *pdev);
int adsp_cpustat_exit(struct platform_device *pdev);
#endif
#if defined(CONFIG_TEGRA_ADSP_FILEIO)
int adspff_init(struct platform_device *pdev);
void adspff_exit(void);
#endif
#ifdef CONFIG_TEGRA_EMC_APE_DFS
status_t emc_dfs_init(struct platform_device *pdev);
void emc_dfs_exit(void);
#endif
#ifdef CONFIG_PM
static inline int __init nvadsp_pm_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
if (drv_data->chip_data->pm_init)
return drv_data->chip_data->pm_init(pdev);
return -EINVAL;
}
#endif
static inline int __init nvadsp_reset_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
if (drv_data->chip_data->reset_init)
return drv_data->chip_data->reset_init(pdev);
return -EINVAL;
}
static inline int __init nvadsp_acast_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
if (drv_data->chip_data->acast_init)
return drv_data->chip_data->acast_init(pdev);
return 0;
}
#ifdef CONFIG_TEGRA_ADSP_LPTHREAD
int adsp_lpthread_init(bool is_adsp_suspended);
int adsp_lpthread_resume(void);
int adsp_lpthread_pause(void);
int adsp_lpthread_uninit(void);
int adsp_lpthread_get_state(void);
int adsp_lpthread_entry(struct platform_device *pdev);
int adsp_lpthread_exit(struct platform_device *pdev);
int adsp_lpthread_set_suspend(bool is_suspended);
#endif
#endif /* __TEGRA_NVADSP_DEV_H */

View File

@@ -0,0 +1,100 @@
/*
* dram_app_mem_manager.c
*
* dram app memory manager for allocating memory for text,bss and data
*
* Copyright (C) 2014-2022, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include "dram_app_mem_manager.h"
#define ALIGN_TO_ADSP_PAGE(x) ALIGN(x, 4096)
static void *dram_app_mem_handle;
static LIST_HEAD(dram_app_mem_alloc_list);
static LIST_HEAD(dram_app_mem_free_list);
void dram_app_mem_print(void)
{
mem_print(dram_app_mem_handle);
}
void *dram_app_mem_request(const char *name, size_t size)
{
return mem_request(dram_app_mem_handle, name, ALIGN_TO_ADSP_PAGE(size));
}
bool dram_app_mem_release(void *handle)
{
return mem_release(dram_app_mem_handle, handle);
}
unsigned long dram_app_mem_get_address(void *handle)
{
return mem_get_address(handle);
}
static struct dentry *dram_app_mem_dump_debugfs_file;
static int dram_app_mem_dump(struct seq_file *s, void *data)
{
mem_dump(dram_app_mem_handle, s);
return 0;
}
static int dram_app_mem_dump_open(struct inode *inode, struct file *file)
{
return single_open(file, dram_app_mem_dump, inode->i_private);
}
static const struct file_operations dram_app_mem_dump_fops = {
.open = dram_app_mem_dump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
int dram_app_mem_init(unsigned long start, unsigned long size)
{
dram_app_mem_handle =
create_mem_manager("DRAM_APP_MANAGER", start, size);
if (IS_ERR(dram_app_mem_handle)) {
pr_err("ERROR: failed to create aram memory_manager");
return PTR_ERR(dram_app_mem_handle);
}
if (debugfs_initialized()) {
dram_app_mem_dump_debugfs_file =
debugfs_create_file("dram_app_mem_dump",
S_IRUSR, NULL, NULL, &dram_app_mem_dump_fops);
if (!dram_app_mem_dump_debugfs_file) {
pr_err("ERROR: failed to create dram_app_mem_dump debugfs");
destroy_mem_manager(dram_app_mem_handle);
return -ENOMEM;
}
}
return 0;
}
void dram_app_mem_exit(void)
{
debugfs_remove(dram_app_mem_dump_debugfs_file);
destroy_mem_manager(dram_app_mem_handle);
}

View File

@@ -0,0 +1,30 @@
/*
* Header file for dram app memory manager
*
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H
#define __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H
#include "mem_manager.h"
int dram_app_mem_init(unsigned long, unsigned long);
void dram_app_mem_exit(void);
void *dram_app_mem_request(const char *name, size_t size);
bool dram_app_mem_release(void *handle);
unsigned long dram_app_mem_get_address(void *handle);
void dram_app_mem_print(void);
#endif /* __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H */

View File

@@ -0,0 +1,472 @@
/*
* emc_dfs.c
*
* Emc dynamic frequency scaling due to APE
*
* Copyright (C) 2014-2020, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
#include <linux/tick.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/kthread.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include "dev.h"
/* Register offsets */
#define ABRIDGE_STATS_READ_0 0x04
#define ABRIDGE_STATS_WRITE_0 0x0c
#define ABRIDGE_STATS_CLEAR_0 0x1b
#define ABRIDGE_STATS_HI_0FFSET 0x04
/* Sample Period in usecs */
#define DEFAULT_SAMPLE_PERIOD 500000
#define INT_SHIFT 32
#define make64(hi, low) ((((u64)hi) << INT_SHIFT) | (low))
#define SCALING_DIVIDER 2
#define BOOST_DOWN_COUNT 2
#define DEFAULT_BOOST_UP_THRESHOLD 2000000;
#define DEFAULT_BOOST_STEP 2
struct emc_dfs_info {
void __iomem *abridge_base;
struct timer_list cnt_timer;
u64 rd_cnt;
u64 wr_cnt;
bool enable;
u64 avg_cnt;
unsigned long timer_rate;
ktime_t prev_time;
u32 dn_count;
u32 boost_dn_count;
u64 boost_up_threshold;
u8 boost_step;
struct work_struct clk_set_work;
unsigned long cur_freq;
bool speed_change_flag;
unsigned long max_freq;
struct clk *emcclk;
};
static struct emc_dfs_info global_emc_info;
static struct emc_dfs_info *einfo;
static struct task_struct *speedchange_task;
static spinlock_t speedchange_lock;
static u64 read64(u32 offset)
{
u32 low;
u32 hi;
low = readl(einfo->abridge_base + offset);
hi = readl(einfo->abridge_base + (offset + ABRIDGE_STATS_HI_0FFSET));
return make64(hi, low);
}
static unsigned long count_to_emcfreq(void)
{
unsigned long tfreq = 0;
if (!einfo->avg_cnt) {
if (einfo->dn_count >= einfo->boost_dn_count) {
tfreq = einfo->cur_freq / SCALING_DIVIDER;
einfo->dn_count = 0;
} else
einfo->dn_count++;
} else if (einfo->avg_cnt >= einfo->boost_up_threshold) {
if (einfo->boost_step)
tfreq = einfo->cur_freq * einfo->boost_step;
}
pr_debug("%s:avg_cnt: %llu current freq(kHz): %lu target freq(kHz): %lu\n",
__func__, einfo->avg_cnt, einfo->cur_freq, tfreq);
return tfreq;
}
static int clk_work(void *data)
{
int ret;
if (einfo->emcclk && einfo->speed_change_flag && einfo->cur_freq) {
ret = clk_set_rate(einfo->emcclk, einfo->cur_freq * 1000);
if (ret) {
pr_err("failed to set ape.emc freq:%d\n", ret);
BUG_ON(ret);
}
einfo->cur_freq = clk_get_rate(einfo->emcclk) / 1000;
pr_info("ape.emc: setting emc clk: %lu\n", einfo->cur_freq);
}
mod_timer(&einfo->cnt_timer,
jiffies + usecs_to_jiffies(einfo->timer_rate));
return 0;
}
static void emc_dfs_timer(unsigned long data)
{
u64 cur_cnt;
u64 delta_cnt;
u64 prev_cnt;
u64 delta_time;
ktime_t now;
unsigned long target_freq;
unsigned long flags;
spin_lock_irqsave(&speedchange_lock, flags);
/* Return if emc dfs is disabled */
if (!einfo->enable) {
spin_unlock_irqrestore(&speedchange_lock, flags);
return;
}
prev_cnt = einfo->rd_cnt + einfo->wr_cnt;
einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
pr_debug("einfo->rd_cnt: %llu einfo->wr_cnt: %llu\n",
einfo->rd_cnt, einfo->wr_cnt);
cur_cnt = einfo->rd_cnt + einfo->wr_cnt;
delta_cnt = cur_cnt - prev_cnt;
now = ktime_get();
delta_time = ktime_to_ns(ktime_sub(now, einfo->prev_time));
if (!delta_time) {
pr_err("%s: time interval to calculate emc scaling is zero\n",
__func__);
spin_unlock_irqrestore(&speedchange_lock, flags);
goto exit;
}
einfo->prev_time = now;
einfo->avg_cnt = delta_cnt / delta_time;
/* if 0: no scaling is required */
target_freq = count_to_emcfreq();
if (!target_freq) {
einfo->speed_change_flag = false;
} else {
einfo->cur_freq = target_freq;
einfo->speed_change_flag = true;
}
spin_unlock_irqrestore(&speedchange_lock, flags);
pr_info("einfo->avg_cnt: %llu delta_cnt: %llu delta_time %llu emc_freq:%lu\n",
einfo->avg_cnt, delta_cnt, delta_time, einfo->cur_freq);
exit:
wake_up_process(speedchange_task);
}
static void emc_dfs_enable(void)
{
einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
einfo->prev_time = ktime_get();
mod_timer(&einfo->cnt_timer, jiffies + 2);
}
static void emc_dfs_disable(void)
{
einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
del_timer_sync(&einfo->cnt_timer);
}
#ifdef CONFIG_DEBUG_FS
static struct dentry *emc_dfs_root;
#define RW_MODE (S_IWUSR | S_IRUSR)
#define RO_MODE S_IRUSR
/* Get emc dfs staus: 0: disabled 1:enabled */
static int dfs_enable_get(void *data, u64 *val)
{
*val = einfo->enable;
return 0;
}
/* Enable/disable emc dfs */
static int dfs_enable_set(void *data, u64 val)
{
einfo->enable = (bool) val;
/*
* If enabling: activate a timer to execute in next 2 jiffies,
* so that emc scaled value takes effect immidiately.
*/
if (einfo->enable)
emc_dfs_enable();
else
emc_dfs_disable();
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(enable_fops, dfs_enable_get,
dfs_enable_set, "%llu\n");
/* Get emc dfs staus: 0: disabled 1:enabled */
static int boost_up_threshold_get(void *data, u64 *val)
{
*val = einfo->boost_up_threshold;
return 0;
}
/* Enable/disable emc dfs */
static int boost_up_threshold_set(void *data, u64 val)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&speedchange_lock, flags);
if (!einfo->enable) {
pr_info("EMC dfs is not enabled\n");
ret = -EINVAL;
goto err;
}
if (val)
einfo->boost_up_threshold = val;
err:
spin_unlock_irqrestore(&speedchange_lock, flags);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(up_threshold_fops,
boost_up_threshold_get, boost_up_threshold_set, "%llu\n");
/* scaling emc freq in multiple of boost factor */
static int boost_step_get(void *data, u64 *val)
{
*val = einfo->boost_step;
return 0;
}
/* Set period in usec */
static int boost_step_set(void *data, u64 val)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&speedchange_lock, flags);
if (!einfo->enable) {
pr_info("EMC dfs is not enabled\n");
ret = -EINVAL;
goto err;
}
if (!val)
einfo->boost_step = 1;
else
einfo->boost_step = (u8) val;
err:
spin_unlock_irqrestore(&speedchange_lock, flags);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(boost_fops, boost_step_get,
boost_step_set, "%llu\n");
/* minimum time after that emc scaling down happens in usec */
static int boost_down_count_get(void *data, u64 *val)
{
*val = einfo->boost_dn_count;
return 0;
}
/* Set period in usec */
static int boost_down_count_set(void *data, u64 val)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&speedchange_lock, flags);
if (!einfo->enable) {
pr_info("EMC dfs is not enabled\n");
ret = -EINVAL;
goto err;
}
if (val)
einfo->boost_dn_count = (u32) val;
ret = 0;
err:
spin_unlock_irqrestore(&speedchange_lock, flags);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(down_cnt_fops, boost_down_count_get,
boost_down_count_set, "%llu\n");
static int period_get(void *data, u64 *val)
{
*val = einfo->timer_rate;
return 0;
}
/* Set period in usec */
static int period_set(void *data, u64 val)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&speedchange_lock, flags);
if (!einfo->enable) {
pr_info("EMC dfs is not enabled\n");
ret = -EINVAL;
goto err;
}
if (val)
einfo->timer_rate = (unsigned long)val;
err:
spin_unlock_irqrestore(&speedchange_lock, flags);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(period_fops, period_get, period_set, "%llu\n");
static int emc_dfs_debugfs_init(struct nvadsp_drv_data *drv)
{
int ret = -ENOMEM;
struct dentry *d;
if (!drv->adsp_debugfs_root)
return ret;
emc_dfs_root = debugfs_create_dir("emc_dfs", drv->adsp_debugfs_root);
if (!emc_dfs_root)
goto err_out;
d = debugfs_create_file("enable", RW_MODE, emc_dfs_root, NULL,
&enable_fops);
if (!d)
goto err_root;
d = debugfs_create_file("boost_up_threshold", RW_MODE, emc_dfs_root,
NULL, &up_threshold_fops);
if (!d)
goto err_root;
d = debugfs_create_file("boost_step", RW_MODE, emc_dfs_root, NULL,
&boost_fops);
if (!d)
goto err_root;
d = debugfs_create_file("boost_down_count", RW_MODE, emc_dfs_root,
NULL, &down_cnt_fops);
if (!d)
goto err_root;
d = debugfs_create_file("period", RW_MODE, emc_dfs_root, NULL,
&period_fops);
if (!d)
goto err_root;
return 0;
err_root:
debugfs_remove_recursive(emc_dfs_root);
err_out:
return ret;
}
#endif
status_t __init emc_dfs_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
#endif
int ret = 0;
einfo = &global_emc_info;
einfo->abridge_base = drv->base_regs[ABRIDGE];
einfo->emcclk = clk_get_sys("ape", "emc");
if (IS_ERR_OR_NULL(einfo->emcclk)) {
dev_info(&pdev->dev, "unable to find ape.emc clock\n");
return PTR_ERR(einfo->emcclk);
}
einfo->timer_rate = DEFAULT_SAMPLE_PERIOD;
einfo->boost_up_threshold = DEFAULT_BOOST_UP_THRESHOLD;
einfo->boost_step = DEFAULT_BOOST_STEP;
einfo->dn_count = 0;
einfo->boost_dn_count = BOOST_DOWN_COUNT;
einfo->enable = 1;
einfo->max_freq = clk_round_rate(einfo->emcclk, ULONG_MAX);
ret = clk_set_rate(einfo->emcclk, einfo->max_freq);
if (ret) {
dev_info(&pdev->dev, "failed to set ape.emc freq:%d\n", ret);
return PTR_ERR(einfo->emcclk);
}
einfo->max_freq /= 1000;
einfo->cur_freq = clk_get_rate(einfo->emcclk) / 1000;
if (!einfo->cur_freq) {
dev_info(&pdev->dev, "ape.emc freq is NULL:\n");
return PTR_ERR(einfo->emcclk);
}
dev_info(&pdev->dev, "einfo->cur_freq %lu\n", einfo->cur_freq);
spin_lock_init(&speedchange_lock);
init_timer(&einfo->cnt_timer);
einfo->cnt_timer.function = emc_dfs_timer;
speedchange_task = kthread_create(clk_work, NULL, "emc_dfs");
if (IS_ERR(speedchange_task))
return PTR_ERR(speedchange_task);
#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
#else
sched_set_fifo(speedchange_task);
#endif
get_task_struct(speedchange_task);
/* NB: wake up so the thread does not look hung to the freezer */
wake_up_process(speedchange_task);
emc_dfs_enable();
dev_info(&pdev->dev, "APE EMC DFS is initialized\n");
#ifdef CONFIG_DEBUG_FS
emc_dfs_debugfs_init(drv);
#endif
return ret;
}
void __exit emc_dfs_exit(void)
{
kthread_stop(speedchange_task);
put_task_struct(speedchange_task);
}

View File

@@ -0,0 +1,330 @@
/*
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/atomic.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/platform_device.h>
#include <linux/tegra_nvadsp.h>
#include <linux/irqchip/tegra-agic.h>
#include "dev.h"
static struct platform_device *nvadsp_pdev;
static struct nvadsp_drv_data *nvadsp_drv_data;
/* Initialized to false by default */
static bool is_hwmbox_busy;
#ifdef CONFIG_MBOX_ACK_HANDLER
static int hwmbox_last_msg;
#endif
/*
* Mailbox 0 is for receiving messages
* from ADSP i.e. CPU <-- ADSP.
*/
#define INT_RECV_HWMBOX INT_AMISC_MBOX_FULL0
static inline u32 recv_hwmbox(void)
{
return nvadsp_drv_data->chip_data->hwmb.hwmbox0_reg;
}
/*
* Mailbox 1 is for sending messages
* to ADSP i.e. CPU --> ADSP
*/
#define INT_SEND_HWMBOX INT_AMISC_MBOX_EMPTY1
static inline u32 send_hwmbox(void)
{
return nvadsp_drv_data->chip_data->hwmb.hwmbox1_reg;
}
u32 hwmb_reg_idx(void)
{
return nvadsp_drv_data->chip_data->hwmb.reg_idx;
}
u32 hwmbox_readl(u32 reg)
{
return readl(nvadsp_drv_data->base_regs[hwmb_reg_idx()] + reg);
}
void hwmbox_writel(u32 val, u32 reg)
{
writel(val, nvadsp_drv_data->base_regs[hwmb_reg_idx()] + reg);
}
#define PRINT_HWMBOX(x) \
dev_info(&nvadsp_pdev->dev, "%s: 0x%x\n", #x, hwmbox_readl(x))
void dump_mailbox_regs(void)
{
dev_info(&nvadsp_pdev->dev, "dumping hwmailbox registers ...\n");
PRINT_HWMBOX(recv_hwmbox());
PRINT_HWMBOX(send_hwmbox());
}
static void hwmboxq_init(struct hwmbox_queue *queue)
{
queue->head = 0;
queue->tail = 0;
queue->count = 0;
init_completion(&queue->comp);
spin_lock_init(&queue->lock);
}
/* Must be called with queue lock held in non-interrupt context */
static inline bool
is_hwmboxq_empty(struct hwmbox_queue *queue)
{
return (queue->count == 0);
}
/* Must be called with queue lock held in non-interrupt context */
static inline bool
is_hwmboxq_full(struct hwmbox_queue *queue)
{
return (queue->count == HWMBOX_QUEUE_SIZE);
}
/* Must be called with queue lock held in non-interrupt context */
static status_t hwmboxq_enqueue(struct hwmbox_queue *queue,
uint32_t data)
{
int ret = 0;
if (is_hwmboxq_full(queue)) {
ret = -EBUSY;
goto comp;
}
queue->array[queue->tail] = data;
queue->tail = (queue->tail + 1) & HWMBOX_QUEUE_SIZE_MASK;
queue->count++;
if (is_hwmboxq_full(queue))
goto comp;
else
goto out;
comp:
reinit_completion(&queue->comp);
out:
return ret;
}
status_t nvadsp_hwmbox_send_data(uint16_t mid, uint32_t data, uint32_t flags)
{
spinlock_t *lock = &nvadsp_drv_data->hwmbox_send_queue.lock;
unsigned long lockflags;
int ret = 0;
if (flags & NVADSP_MBOX_SMSG) {
data = PREPARE_HWMBOX_SMSG(mid, data);
pr_debug("nvadsp_mbox_send: data: 0x%x\n", data);
}
/* TODO handle LMSG */
spin_lock_irqsave(lock, lockflags);
if (!is_hwmbox_busy) {
is_hwmbox_busy = true;
pr_debug("nvadsp_mbox_send: empty mailbox. write to mailbox.\n");
#ifdef CONFIG_MBOX_ACK_HANDLER
hwmbox_last_msg = data;
#endif
hwmbox_writel(data, send_hwmbox());
} else {
pr_debug("nvadsp_mbox_send: enqueue data\n");
ret = hwmboxq_enqueue(&nvadsp_drv_data->hwmbox_send_queue,
data);
}
spin_unlock_irqrestore(lock, lockflags);
return ret;
}
/* Must be called with queue lock held in non-interrupt context */
static status_t hwmboxq_dequeue(struct hwmbox_queue *queue,
uint32_t *data)
{
int ret = 0;
if (is_hwmboxq_empty(queue)) {
ret = -EBUSY;
goto out;
}
if (is_hwmboxq_full(queue))
complete_all(&nvadsp_drv_data->hwmbox_send_queue.comp);
*data = queue->array[queue->head];
queue->head = (queue->head + 1) & HWMBOX_QUEUE_SIZE_MASK;
queue->count--;
out:
return ret;
}
static irqreturn_t hwmbox_send_empty_int_handler(int irq, void *devid)
{
spinlock_t *lock = &nvadsp_drv_data->hwmbox_send_queue.lock;
struct device *dev = &nvadsp_pdev->dev;
unsigned long lockflags;
uint32_t data;
int ret;
if (!is_hwmbox_busy)
return IRQ_HANDLED;
spin_lock_irqsave(lock, lockflags);
data = hwmbox_readl(send_hwmbox());
if (data != PREPARE_HWMBOX_EMPTY_MSG())
dev_err(dev, "last mailbox sent failed with 0x%x\n", data);
#ifdef CONFIG_MBOX_ACK_HANDLER
{
uint16_t last_mboxid = HWMBOX_SMSG_MID(hwmbox_last_msg);
struct nvadsp_mbox *mbox = nvadsp_drv_data->mboxes[last_mboxid];
if (mbox) {
nvadsp_mbox_handler_t ack_handler = mbox->ack_handler;
if (ack_handler) {
uint32_t msg = HWMBOX_SMSG_MSG(hwmbox_last_msg);
ack_handler(msg, mbox->hdata);
}
}
}
#endif
ret = hwmboxq_dequeue(&nvadsp_drv_data->hwmbox_send_queue,
&data);
if (ret == 0) {
#ifdef CONFIG_MBOX_ACK_HANDLER
hwmbox_last_msg = data;
#endif
hwmbox_writel(data, send_hwmbox());
dev_dbg(dev, "Writing 0x%x to SEND_HWMBOX\n", data);
} else {
is_hwmbox_busy = false;
}
spin_unlock_irqrestore(lock, lockflags);
return IRQ_HANDLED;
}
static irqreturn_t hwmbox_recv_full_int_handler(int irq, void *devid)
{
uint32_t data;
int ret;
data = hwmbox_readl(recv_hwmbox());
hwmbox_writel(PREPARE_HWMBOX_EMPTY_MSG(), recv_hwmbox());
if (IS_HWMBOX_MSG_SMSG(data)) {
uint16_t mboxid = HWMBOX_SMSG_MID(data);
struct nvadsp_mbox *mbox = nvadsp_drv_data->mboxes[mboxid];
if (!mbox) {
dev_info(&nvadsp_pdev->dev,
"Failed to get mbox for mboxid: %u\n",
mboxid);
goto out;
}
if (mbox->handler) {
mbox->handler(HWMBOX_SMSG_MSG(data), mbox->hdata);
} else {
ret = nvadsp_mboxq_enqueue(&mbox->recv_queue,
HWMBOX_SMSG_MSG(data));
if (ret) {
dev_info(&nvadsp_pdev->dev,
"Failed to deliver msg 0x%x to"
" mbox id %u\n",
HWMBOX_SMSG_MSG(data), mboxid);
goto out;
}
}
} else if (IS_HWMBOX_MSG_LMSG(data)) {
/* TODO */
}
out:
return IRQ_HANDLED;
}
void nvadsp_free_hwmbox_interrupts(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int recv_virq, send_virq;
recv_virq = drv->agic_irqs[MBOX_RECV_VIRQ];
send_virq = drv->agic_irqs[MBOX_SEND_VIRQ];
devm_free_irq(dev, recv_virq, pdev);
devm_free_irq(dev, send_virq, pdev);
}
int nvadsp_setup_hwmbox_interrupts(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
u32 empty_int_ie = drv->chip_data->hwmb.empty_int_ie;
int recv_virq, send_virq;
int ret;
recv_virq = drv->agic_irqs[MBOX_RECV_VIRQ];
send_virq = drv->agic_irqs[MBOX_SEND_VIRQ];
ret = devm_request_irq(dev, recv_virq, hwmbox_recv_full_int_handler,
IRQF_TRIGGER_RISING, "hwmbox0_recv_full", pdev);
if (ret)
goto err;
if (empty_int_ie)
hwmbox_writel(0x0, send_hwmbox() + empty_int_ie);
ret = devm_request_irq(dev, send_virq, hwmbox_send_empty_int_handler,
IRQF_TRIGGER_RISING,
"hwmbox1_send_empty", pdev);
if (empty_int_ie)
hwmbox_writel(0x1, send_hwmbox() + empty_int_ie);
if (ret)
goto free_interrupts;
return ret;
free_interrupts:
nvadsp_free_hwmbox_interrupts(pdev);
err:
return ret;
}
int __init nvadsp_hwmbox_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
int ret = 0;
nvadsp_pdev = pdev;
nvadsp_drv_data = drv;
hwmboxq_init(&drv->hwmbox_send_queue);
return ret;
}

View File

@@ -0,0 +1,116 @@
/*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __HWMAILBOX_H
#define __HWMAILBOX_H
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
/*
* The interpretation of hwmailbox content is:
* 31 30 29 0
* [TAG|TYPE|MESSAGE]
*/
#define HWMBOX_TAG_SHIFT 31
#define HWMBOX_TAG_MASK 0x1
#define HWMBOX_TAG_INVALID 0
#define HWMBOX_TAG_VALID 1
/* Set Invalid TAG */
#define SET_HWMBOX_TAG_INVALID (HWMBOX_TAG_INVALID << HWMBOX_TAG_SHIFT)
/* Set Valid TAG */
#define SET_HWMBOX_TAG_VALID (HWMBOX_TAG_VALID << HWMBOX_TAG_SHIFT)
/* Get current TAG */
#define HWMBOX_TAG(val) ((val & HWMBOX_TAG_MASK) << HWMBOX_TAG_SHIFT)
/*
* Mailbox can be used for sending short messages and long messages
*/
#define HWMBOX_MSG_TYPE_SHIFT 30
#define HWMBOX_MSG_TYPE_MASK 0x1
#define HWMBOX_MSG_SMSG 0
#define HWMBOX_MSG_LMSG 1
/* Set SMSG type */
#define SET_HWMBOX_MSG_SMSG (HWMBOX_MSG_SMSG << HWMBOX_MSG_TYPE_SHIFT)
/* Set LMSG type */
#define SET_HWMBOX_MSG_LMSG (HWMBOX_MSG_LMSG << HWMBOX_MSG_TYPE_SHIFT)
/* Get MSG type */
#define HWMBOX_MSG_TYPE(val) \
((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK)
/* Check if SMSG */
#define IS_HWMBOX_MSG_SMSG(val) \
(!((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK))
/* Check if LMSG */
#define IS_HWMBOX_MSG_LMSG(val) \
((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK)
/*
* The format for a short message is:
* 31 30 29 20 19 0
* [TAG|TYPE|MBOX ID|SHORT MESSAGE]
* 1b 1b 10bits 20bits
*/
#define HWMBOX_SMSG_SHIFT 0
#define HWMBOX_SMSG_MASK 0x3FFFFFFF
#define HWMBOX_SMSG(val) ((val >> HWMBOX_SMSG_SHIFT) & HWMBOX_SMSG_MASK)
#define HWMBOX_SMSG_MID_SHIFT 20
#define HWMBOX_SMSG_MID_MASK 0x3FF
#define HWMBOX_SMSG_MID(val) \
((val >> HWMBOX_SMSG_MID_SHIFT) & HWMBOX_SMSG_MID_MASK)
#define HWMBOX_SMSG_MSG_SHIFT 0
#define HWMBOX_SMSG_MSG_MASK 0xFFFFF
#define HWMBOX_SMSG_MSG(val) \
((val >> HWMBOX_SMSG_MSG_SHIFT) & HWMBOX_SMSG_MSG_MASK)
/* Set mailbox id for a short message */
#define SET_HWMBOX_SMSG_MID(val) \
((val & HWMBOX_SMSG_MID_MASK) << HWMBOX_SMSG_MID_SHIFT)
/* Set msg value in a short message */
#define SET_HWMBOX_SMSG_MSG(val) \
((val & HWMBOX_SMSG_MSG_MASK) << HWMBOX_SMSG_MSG_SHIFT)
/* Prepare a small message with mailbox id and data */
#define PREPARE_HWMBOX_SMSG(mid, data) (SET_HWMBOX_TAG_VALID | \
SET_HWMBOX_MSG_SMSG | \
SET_HWMBOX_SMSG_MID(mid) | \
SET_HWMBOX_SMSG_MSG(data))
/* Prepare empty mailbox value */
#define PREPARE_HWMBOX_EMPTY_MSG() (HWMBOX_TAG_INVALID | 0x0)
/*
* Queue size must be power of 2 as '&' op
* is being used to manage circular queues
*/
#define HWMBOX_QUEUE_SIZE 1024
#define HWMBOX_QUEUE_SIZE_MASK (HWMBOX_QUEUE_SIZE - 1)
struct hwmbox_queue {
uint32_t array[HWMBOX_QUEUE_SIZE];
uint16_t head;
uint16_t tail;
uint16_t count;
struct completion comp;
spinlock_t lock;
};
u32 hwmb_reg_idx(void);
u32 hwmbox_readl(u32 reg);
void hwmbox_writel(u32 val, u32 reg);
int nvadsp_hwmbox_init(struct platform_device *);
status_t nvadsp_hwmbox_send_data(uint16_t, uint32_t, uint32_t);
void dump_mailbox_regs(void);
int nvadsp_setup_hwmbox_interrupts(struct platform_device *pdev);
void nvadsp_free_hwmbox_interrupts(struct platform_device *pdev);
#endif /* __HWMAILBOX_H */

View File

@@ -0,0 +1,84 @@
/*
* Copyright (C) 2017, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __LOG_STATE_H
#define __LOG_STATE_H
#ifdef __ASSEMBLER__
#define ENUM_START
#define ENUM_VALUE(key, value) .equ key, value
#define ENUM_END(typename)
#else
#define ENUM_START typedef enum {
#define ENUM_VALUE(key, value) key = value,
#define ENUM_END(typename) } typename;
#endif
#define STATE_LOG_MASK 0x7FFFFFFF
ENUM_START
ENUM_VALUE(ADSP_LOADER_MAIN_ENTRY, 0x1)
ENUM_VALUE(ADSP_LOADER_MAIN_CACHE_DISABLE_COMPLETE, 0x2)
ENUM_VALUE(ADSP_LOADER_MAIN_CONFIGURE_MMU_COMPLETE, 0x3)
ENUM_VALUE(ADSP_LOADER_MAIN_CACHE_ENABLE_COMPLETE, 0x4)
ENUM_VALUE(ADSP_LOADER_MAIN_FPU_ENABLE_COMPLETE, 0x5)
ENUM_VALUE(ADSP_LOADER_MAIN_DECOMPRESSION_COMPLETE, 0x6)
ENUM_VALUE(ADSP_LOADER_MAIN_EXIT, 0x7)
ENUM_VALUE(ADSP_START_ENTRY_AT_RESET, 0x101)
ENUM_VALUE(ADSP_START_CPU_EARLY_INIT, 0x102)
ENUM_VALUE(ADSP_START_FIRST_BOOT, 0x103)
ENUM_VALUE(ADSP_START_LK_MAIN_ENTRY, 0x104)
ENUM_VALUE(ADSP_LK_MAIN_ENTRY, 0x201)
ENUM_VALUE(ADSP_LK_MAIN_EARLY_THREAD_INIT_COMPLETE, 0x202)
ENUM_VALUE(ADSP_LK_MAIN_EARLY_ARCH_INIT_COMPLETE, 0x203)
ENUM_VALUE(ADSP_LK_MAIN_EARLY_PLATFORM_INIT_COMPLETE, 0x204)
ENUM_VALUE(ADSP_LK_MAIN_EARLY_TARGET_INIT_COMPLETE, 0x205)
ENUM_VALUE(ADSP_LK_MAIN_CONSTRUCTOR_INIT_COMPLETE, 0x206)
ENUM_VALUE(ADSP_LK_MAIN_HEAP_INIT_COMPLETE, 0x207)
ENUM_VALUE(ADSP_LK_MAIN_KERNEL_INIT_COMPLETE, 0x208)
ENUM_VALUE(ADSP_LK_MAIN_CPU_RESUME_ENTRY, 0x209)
ENUM_VALUE(ADSP_BOOTSTRAP2_ARCH_INIT_COMPLETE, 0x301)
ENUM_VALUE(ADSP_BOOTSTRAP2_PLATFORM_INIT_COMPLETE, 0x302)
ENUM_VALUE(ADSP_BOOTSTRAP2_TARGET_INIT_COMPLETE, 0x303)
ENUM_VALUE(ADSP_BOOTSTRAP2_APP_MODULE_INIT_COMPLETE, 0x304)
ENUM_VALUE(ADSP_BOOTSTRAP2_APP_INIT_COMPLETE, 0x305)
ENUM_VALUE(ADSP_BOOTSTRAP2_STATIC_APP_INIT_COMPLETE, 0x306)
ENUM_VALUE(ADSP_BOOTSTRAP2_OS_LOAD_COMPLETE, 0x307)
ENUM_VALUE(ADSP_SUSPEND_BEGINS, 0x320)
ENUM_VALUE(ADSP_SUSPEND_MBX_SEND_COMPLETE, 0x321)
ENUM_VALUE(ADSP_SUSPEND_DISABLED_TIMERS, 0x322)
ENUM_VALUE(ADSP_SUSPEND_DISABLED_INTS, 0x323)
ENUM_VALUE(ADSP_SUSPEND_ARAM_SAVED, 0x324)
ENUM_VALUE(ADSP_SUSPEND_AMC_SAVED, 0x325)
ENUM_VALUE(ADSP_SUSPEND_AMISC_SAVED, 0x326)
ENUM_VALUE(ADSP_SUSPEND_L1_CACHE_DISABLED, 0x327)
ENUM_VALUE(ADSP_SUSPEND_L2_CACHE_DISABLED, 0x328)
ENUM_VALUE(ADSP_RESUME_ADSP, 0x330)
ENUM_VALUE(ADSP_RESUME_AMISC_RESTORED, 0x331)
ENUM_VALUE(ADSP_RESUME_AMC_RESTORED, 0x332)
ENUM_VALUE(ADSP_RESUME_ARAM_RESTORED, 0x333)
ENUM_VALUE(ADSP_RESUME_COMPLETE, 0x334)
ENUM_VALUE(ADSP_WFI_ENTER, 0x335)
ENUM_VALUE(ADSP_WFI_EXIT, 0x336)
ENUM_VALUE(ADSP_DFS_MBOX_RECV, 0x337)
ENUM_VALUE(ADSP_DFS_MBOX_SENT, 0x338)
ENUM_END(adsp_state)
#endif

View File

@@ -0,0 +1,353 @@
/*
* ADSP mailbox manager
*
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "dev.h"
#include <linux/nospec.h>
#include <asm/barrier.h>
#define NVADSP_MAILBOX_START 512
#define NVADSP_MAILBOX_MAX 1024
#define NVADSP_MAILBOX_OS_MAX 16
static struct nvadsp_mbox *nvadsp_mboxes[NVADSP_MAILBOX_MAX];
static DECLARE_BITMAP(nvadsp_mbox_ids, NVADSP_MAILBOX_MAX);
static struct nvadsp_drv_data *nvadsp_drv_data;
static inline bool is_mboxq_empty(struct nvadsp_mbox_queue *queue)
{
return (queue->count == 0);
}
static inline bool is_mboxq_full(struct nvadsp_mbox_queue *queue)
{
return (queue->count == NVADSP_MBOX_QUEUE_SIZE);
}
static void mboxq_init(struct nvadsp_mbox_queue *queue)
{
queue->head = 0;
queue->tail = 0;
queue->count = 0;
init_completion(&queue->comp);
spin_lock_init(&queue->lock);
}
static void mboxq_destroy(struct nvadsp_mbox_queue *queue)
{
if (!is_mboxq_empty(queue))
pr_info("Mbox queue %p is not empty.\n", queue);
queue->head = 0;
queue->tail = 0;
queue->count = 0;
}
static status_t mboxq_enqueue(struct nvadsp_mbox_queue *queue,
uint32_t data)
{
unsigned long flags;
int ret = 0;
if (is_mboxq_full(queue)) {
ret = -EINVAL;
goto out;
}
spin_lock_irqsave(&queue->lock, flags);
if (is_mboxq_empty(queue))
complete_all(&queue->comp);
queue->array[queue->tail] = data;
queue->tail = (queue->tail + 1) & NVADSP_MBOX_QUEUE_SIZE_MASK;
queue->count++;
spin_unlock_irqrestore(&queue->lock, flags);
out:
return ret;
}
status_t nvadsp_mboxq_enqueue(struct nvadsp_mbox_queue *queue,
uint32_t data)
{
return mboxq_enqueue(queue, data);
}
static status_t mboxq_dequeue(struct nvadsp_mbox_queue *queue,
uint32_t *data)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&queue->lock, flags);
if (is_mboxq_empty(queue)) {
ret = -EBUSY;
goto comp;
}
*data = queue->array[queue->head];
queue->head = (queue->head + 1) & NVADSP_MBOX_QUEUE_SIZE_MASK;
queue->count--;
if (is_mboxq_empty(queue))
goto comp;
else
goto out;
comp:
reinit_completion(&queue->comp);
out:
spin_unlock_irqrestore(&queue->lock, flags);
return ret;
}
static void mboxq_dump(struct nvadsp_mbox_queue *queue)
{
unsigned long flags;
uint16_t head, count;
uint32_t data;
spin_lock_irqsave(&queue->lock, flags);
count = queue->count;
pr_info("nvadsp: queue %p count:%d\n", queue, count);
pr_info("nvadsp: queue data: ");
head = queue->head;
while (count) {
data = queue->array[head];
head = (head + 1) & NVADSP_MBOX_QUEUE_SIZE_MASK;
count--;
pr_info("0x%x ", data);
}
pr_info(" dumped\n");
spin_unlock_irqrestore(&queue->lock, flags);
}
static uint16_t nvadsp_mbox_alloc_mboxid(void)
{
unsigned long start = NVADSP_MAILBOX_START;
unsigned int nr = 1;
unsigned long align = 0;
uint16_t mid;
mid = bitmap_find_next_zero_area(nvadsp_drv_data->mbox_ids,
NVADSP_MAILBOX_MAX - 1,
start, nr, align);
bitmap_set(nvadsp_drv_data->mbox_ids, mid, 1);
return mid;
}
static status_t nvadsp_mbox_free_mboxid(uint16_t mid)
{
bitmap_clear(nvadsp_drv_data->mbox_ids, mid, 1);
return 0;
}
status_t nvadsp_mbox_open(struct nvadsp_mbox *mbox, uint16_t *mid,
const char *name, nvadsp_mbox_handler_t handler,
void *hdata)
{
unsigned long flags;
int ret = 0;
if (!nvadsp_drv_data) {
ret = -ENOSYS;
goto err;
}
spin_lock_irqsave(&nvadsp_drv_data->mbox_lock, flags);
if (!mbox) {
ret = -EINVAL;
goto out;
}
if (*mid == 0) {
mbox->id = nvadsp_mbox_alloc_mboxid();
if (mbox->id >= NVADSP_MAILBOX_MAX) {
ret = -ENOMEM;
mbox->id = 0;
goto out;
}
*mid = mbox->id;
} else {
if (*mid >= NVADSP_MAILBOX_MAX) {
pr_debug("%s: Invalid mailbox %d.\n",
__func__, *mid);
ret = -ERANGE;
goto out;
}
*mid = array_index_nospec(*mid, NVADSP_MAILBOX_MAX);
if (nvadsp_drv_data->mboxes[*mid]) {
pr_debug("%s: mailbox %d already opened.\n",
__func__, *mid);
ret = -EADDRINUSE;
goto out;
}
mbox->id = *mid;
}
strncpy(mbox->name, name, NVADSP_MBOX_NAME_MAX);
mboxq_init(&mbox->recv_queue);
mbox->handler = handler;
mbox->hdata = hdata;
nvadsp_drv_data->mboxes[mbox->id] = mbox;
out:
spin_unlock_irqrestore(&nvadsp_drv_data->mbox_lock, flags);
err:
return ret;
}
EXPORT_SYMBOL(nvadsp_mbox_open);
status_t nvadsp_mbox_send(struct nvadsp_mbox *mbox, uint32_t data,
uint32_t flags, bool block, unsigned int timeout)
{
int ret = 0;
if (!nvadsp_drv_data) {
pr_err("ADSP drv_data is NULL\n");
ret = -ENOSYS;
goto out;
}
if (!mbox) {
pr_err("ADSP MBOX is NULL\n");
ret = -EINVAL;
goto out;
}
retry:
ret = nvadsp_hwmbox_send_data(mbox->id, data, flags);
if (!ret)
goto out;
if (ret == -EBUSY) {
if (block) {
ret = wait_for_completion_timeout(
&nvadsp_drv_data->hwmbox_send_queue.comp,
msecs_to_jiffies(timeout));
if (ret) {
pr_warn("ADSP HWMBOX send retry\n");
block = false;
goto retry;
} else {
pr_err("ADSP wait for completion timed out\n");
ret = -ETIME;
goto out;
}
} else {
pr_debug("Failed to enqueue data 0x%x. ret: %d\n",
data, ret);
}
} else if (ret) {
pr_warn("Failed to enqueue data 0x%x. ret: %d\n", data, ret);
goto out;
}
out:
return ret;
}
EXPORT_SYMBOL(nvadsp_mbox_send);
status_t nvadsp_mbox_recv(struct nvadsp_mbox *mbox, uint32_t *data, bool block,
unsigned int timeout)
{
int ret = 0;
if (!nvadsp_drv_data) {
ret = -ENOSYS;
goto out;
}
if (!mbox) {
ret = -EINVAL;
goto out;
}
retry:
ret = mboxq_dequeue(&mbox->recv_queue, data);
if (!ret)
goto out;
if (ret == -EBUSY) {
if (block) {
ret = wait_for_completion_timeout(
&mbox->recv_queue.comp,
msecs_to_jiffies(timeout));
if (ret) {
block = false;
goto retry;
} else {
ret = -ETIME;
goto out;
}
} else {
pr_debug("Failed to receive data. ret: %d\n", ret);
}
} else if (ret) {
pr_debug("Failed to receive data. ret: %d\n", ret);
goto out;
}
out:
return ret;
}
EXPORT_SYMBOL(nvadsp_mbox_recv);
status_t nvadsp_mbox_close(struct nvadsp_mbox *mbox)
{
unsigned long flags;
int ret = 0;
if (!nvadsp_drv_data) {
ret = -ENOSYS;
goto err;
}
spin_lock_irqsave(&nvadsp_drv_data->mbox_lock, flags);
if (!mbox) {
ret = -EINVAL;
goto out;
}
if (!is_mboxq_empty(&mbox->recv_queue)) {
ret = -ENOTEMPTY;
mboxq_dump(&mbox->recv_queue);
goto out;
}
nvadsp_mbox_free_mboxid(mbox->id);
mboxq_destroy(&mbox->recv_queue);
nvadsp_drv_data->mboxes[mbox->id] = NULL;
out:
spin_unlock_irqrestore(&nvadsp_drv_data->mbox_lock, flags);
err:
return ret;
}
EXPORT_SYMBOL(nvadsp_mbox_close);
status_t __init nvadsp_mbox_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
drv->mboxes = nvadsp_mboxes;
drv->mbox_ids = nvadsp_mbox_ids;
spin_lock_init(&drv->mbox_lock);
nvadsp_drv_data = drv;
return 0;
}

View File

@@ -0,0 +1,316 @@
/*
* mem_manager.c
*
* memory manager
*
* Copyright (C) 2014-2018 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include "mem_manager.h"
static void clear_alloc_list(struct mem_manager_info *mm_info);
void *mem_request(void *mem_handle, const char *name, size_t size)
{
unsigned long flags;
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_iterator = NULL, *best_match_chunk = NULL;
struct mem_chunk *new_mc = NULL;
spin_lock_irqsave(&mm_info->lock, flags);
/* Is mem full? */
if (list_empty(mm_info->free_list)) {
pr_err("%s : memory full\n", mm_info->name);
spin_unlock_irqrestore(&mm_info->lock, flags);
return ERR_PTR(-ENOMEM);
}
/* Find the best size match */
list_for_each_entry(mc_iterator, mm_info->free_list, node) {
if (mc_iterator->size >= size) {
if (best_match_chunk == NULL)
best_match_chunk = mc_iterator;
else if (mc_iterator->size < best_match_chunk->size)
best_match_chunk = mc_iterator;
}
}
/* Is free node found? */
if (best_match_chunk == NULL) {
pr_err("%s : no enough memory available\n", mm_info->name);
spin_unlock_irqrestore(&mm_info->lock, flags);
return ERR_PTR(-ENOMEM);
}
/* Is it exact match? */
if (best_match_chunk->size == size) {
list_del(&best_match_chunk->node);
list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
if (best_match_chunk->address < mc_iterator->address) {
list_add_tail(&best_match_chunk->node,
&mc_iterator->node);
strlcpy(best_match_chunk->name, name,
NAME_SIZE);
spin_unlock_irqrestore(&mm_info->lock, flags);
return best_match_chunk;
}
}
list_add(&best_match_chunk->node, mm_info->alloc_list);
strlcpy(best_match_chunk->name, name, NAME_SIZE);
spin_unlock_irqrestore(&mm_info->lock, flags);
return best_match_chunk;
} else {
new_mc = kzalloc(sizeof(struct mem_chunk), GFP_ATOMIC);
if (unlikely(!new_mc)) {
pr_err("failed to allocate memory for mem_chunk\n");
spin_unlock_irqrestore(&mm_info->lock, flags);
return ERR_PTR(-ENOMEM);
}
new_mc->address = best_match_chunk->address;
new_mc->size = size;
strlcpy(new_mc->name, name, NAME_SIZE);
best_match_chunk->address += size;
best_match_chunk->size -= size;
list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
if (new_mc->address < mc_iterator->address) {
list_add_tail(&new_mc->node,
&mc_iterator->node);
spin_unlock_irqrestore(&mm_info->lock, flags);
return new_mc;
}
}
list_add_tail(&new_mc->node, mm_info->alloc_list);
spin_unlock_irqrestore(&mm_info->lock, flags);
return new_mc;
}
}
/*
* Find the node with sepcified address and remove it from list
*/
bool mem_release(void *mem_handle, void *handle)
{
unsigned long flags;
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_curr = NULL, *mc_prev = NULL;
struct mem_chunk *mc_free = (struct mem_chunk *)handle;
pr_debug(" addr = %lu, size = %lu, name = %s\n",
mc_free->address, mc_free->size, mc_free->name);
spin_lock_irqsave(&mm_info->lock, flags);
list_for_each_entry(mc_curr, mm_info->free_list, node) {
if (mc_free->address < mc_curr->address) {
strlcpy(mc_free->name, "FREE", NAME_SIZE);
/* adjacent next free node */
if (mc_curr->address ==
(mc_free->address + mc_free->size)) {
mc_curr->address = mc_free->address;
mc_curr->size += mc_free->size;
list_del(&mc_free->node);
kfree(mc_free);
/* and adjacent prev free node */
if ((mc_prev != NULL) &&
((mc_prev->address + mc_prev->size) ==
mc_curr->address)) {
mc_prev->size += mc_curr->size;
list_del(&mc_curr->node);
kfree(mc_curr);
}
}
/* adjacent prev free node */
else if ((mc_prev != NULL) &&
((mc_prev->address + mc_prev->size) ==
mc_free->address)) {
mc_prev->size += mc_free->size;
list_del(&mc_free->node);
kfree(mc_free);
} else {
list_del(&mc_free->node);
list_add_tail(&mc_free->node,
&mc_curr->node);
}
spin_unlock_irqrestore(&mm_info->lock, flags);
return true;
}
mc_prev = mc_curr;
}
spin_unlock_irqrestore(&mm_info->lock, flags);
return false;
}
inline unsigned long mem_get_address(void *handle)
{
struct mem_chunk *mc = (struct mem_chunk *)handle;
return mc->address;
}
void mem_print(void *mem_handle)
{
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_iterator = NULL;
pr_info("------------------------------------\n");
pr_info("%s ALLOCATED\n", mm_info->name);
list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
pr_info(" addr = %lu, size = %lu, name = %s\n",
mc_iterator->address, mc_iterator->size,
mc_iterator->name);
}
pr_info("%s FREE\n", mm_info->name);
list_for_each_entry(mc_iterator, mm_info->free_list, node) {
pr_info(" addr = %lu, size = %lu, name = %s\n",
mc_iterator->address, mc_iterator->size,
mc_iterator->name);
}
pr_info("------------------------------------\n");
}
void mem_dump(void *mem_handle, struct seq_file *s)
{
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_iterator = NULL;
seq_puts(s, "---------------------------------------\n");
seq_printf(s, "%s ALLOCATED\n", mm_info->name);
list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
seq_printf(s, " addr = %lu, size = %lu, name = %s\n",
mc_iterator->address, mc_iterator->size,
mc_iterator->name);
}
seq_printf(s, "%s FREE\n", mm_info->name);
list_for_each_entry(mc_iterator, mm_info->free_list, node) {
seq_printf(s, " addr = %lu, size = %lu, name = %s\n",
mc_iterator->address, mc_iterator->size,
mc_iterator->name);
}
seq_puts(s, "---------------------------------------\n");
}
static void clear_alloc_list(struct mem_manager_info *mm_info)
{
struct list_head *curr, *next;
struct mem_chunk *mc = NULL;
list_for_each_safe(curr, next, mm_info->alloc_list) {
mc = list_entry(curr, struct mem_chunk, node);
pr_debug(" addr = %lu, size = %lu, name = %s\n",
mc->address, mc->size,
mc->name);
mem_release(mm_info, mc);
}
}
void *create_mem_manager(const char *name, unsigned long start_address,
unsigned long size)
{
void *ret = NULL;
struct mem_chunk *mc;
struct mem_manager_info *mm_info =
kzalloc(sizeof(struct mem_manager_info), GFP_KERNEL);
if (unlikely(!mm_info)) {
pr_err("failed to allocate memory for mem_manager_info\n");
return ERR_PTR(-ENOMEM);
}
strlcpy(mm_info->name, name, NAME_SIZE);
mm_info->alloc_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (unlikely(!mm_info->alloc_list)) {
pr_err("failed to allocate memory for alloc_list\n");
ret = ERR_PTR(-ENOMEM);
goto free_mm_info;
}
mm_info->free_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (unlikely(!mm_info->free_list)) {
pr_err("failed to allocate memory for free_list\n");
ret = ERR_PTR(-ENOMEM);
goto free_alloc_list;
}
INIT_LIST_HEAD(mm_info->alloc_list);
INIT_LIST_HEAD(mm_info->free_list);
mm_info->start_address = start_address;
mm_info->size = size;
/* Add whole memory to free list */
mc = kzalloc(sizeof(struct mem_chunk), GFP_KERNEL);
if (unlikely(!mc)) {
pr_err("failed to allocate memory for mem_chunk\n");
ret = ERR_PTR(-ENOMEM);
goto free_free_list;
}
mc->address = mm_info->start_address;
mc->size = mm_info->size;
strlcpy(mc->name, "FREE", NAME_SIZE);
list_add(&mc->node, mm_info->free_list);
spin_lock_init(&mm_info->lock);
return (void *)mm_info;
free_free_list:
kfree(mm_info->free_list);
free_alloc_list:
kfree(mm_info->alloc_list);
free_mm_info:
kfree(mm_info);
return ret;
}
void destroy_mem_manager(void *mem_handle)
{
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_last = NULL;
/* Clear all allocated memory */
clear_alloc_list(mm_info);
mc_last = list_entry((mm_info->free_list)->next,
struct mem_chunk, node);
list_del(&mc_last->node);
kfree(mc_last);
kfree(mm_info->alloc_list);
kfree(mm_info->free_list);
kfree(mm_info);
}

View File

@@ -0,0 +1,51 @@
/*
* Header file for memory manager
*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __TEGRA_NVADSP_MEM_MANAGER_H
#define __TEGRA_NVADSP_MEM_MANAGER_H
#include <linux/sizes.h>
#define NAME_SIZE SZ_16
struct mem_chunk {
struct list_head node;
char name[NAME_SIZE];
unsigned long address;
unsigned long size;
};
struct mem_manager_info {
struct list_head *alloc_list;
struct list_head *free_list;
char name[NAME_SIZE];
unsigned long start_address;
unsigned long size;
spinlock_t lock;
};
void *create_mem_manager(const char *name, unsigned long start_address,
unsigned long size);
void destroy_mem_manager(void *mem_handle);
void *mem_request(void *mem_handle, const char *name, size_t size);
bool mem_release(void *mem_handle, void *handle);
unsigned long mem_get_address(void *handle);
void mem_print(void *mem_handle);
void mem_dump(void *mem_handle, struct seq_file *s);
#endif /* __TEGRA_NVADSP_MEM_MANAGER_H */

View File

@@ -0,0 +1,178 @@
/*
* ADSP circular message queue
*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/tegra_nvadsp.h>
#define msgq_wmemcpy(dest, src, words) \
memcpy(dest, src, (words) * sizeof(int32_t))
/**
* msgq_init - Initialize message queue
* @msgq: pointer to the client message queue
* @size: size of message queue in words
* size will be capped to MSGQ_MAX_WSIZE
*
* This function returns 0 if no error has occurred.
*
* The message queue requires space for the queue to be
* preallocated and should only be initialized once. The queue
* space immediately follows the queue header and begins at
* msgq_t::message_queue. All messages are queued directly with
* no pointer address space translation.
*
*
*/
void msgq_init(msgq_t *msgq, int32_t size)
{
if (MSGQ_MAX_QUEUE_WSIZE < size) {
/* cap the maximum size */
pr_info("msgq_init: %d size capped to MSGQ_MAX_QUEUE_WSIZE\n",
size);
size = MSGQ_MAX_QUEUE_WSIZE;
}
msgq->size = size;
msgq->read_index = 0;
msgq->write_index = 0;
}
EXPORT_SYMBOL(msgq_init);
/**
* msgq_queue_message - Queues a message in the queue
* @msgq: pointer to the client message queue
* @message: Message buffer to copy from
*
* This function returns 0 if no error has occurred. ERR_NO_MEMORY will
* be returned if no space is available in the queue for the
* entire message. On ERR_NO_MEMORY, it may be possible the
* queue size was capped at init time to MSGQ_MAX_WSIZE if an
* unreasonable size was sepecified.
*
*
*/
int32_t msgq_queue_message(msgq_t *msgq, const msgq_message_t *message)
{
int32_t ret = 0;
if (msgq && message) {
int32_t ri = msgq->read_index;
int32_t wi = msgq->write_index;
bool wrap = ri <= wi;
int32_t *start = msgq->queue;
int32_t *end = &msgq->queue[msgq->size];
int32_t *first = &msgq->queue[wi];
int32_t *last = &msgq->queue[ri];
int32_t qremainder = wrap ? end - first : last - first;
int32_t qsize = wrap ? qremainder + (last - start) : qremainder;
int32_t msize = &message->payload[message->size] -
(int32_t *)message;
if (qsize <= msize) {
/* don't allow read == write */
pr_err("%s failed: msgq ri: %d, wi %d, msg size %d\n",
__func__, msgq->read_index,
msgq->write_index, message->size);
ret = -ENOSPC;
} else if (msize < qremainder) {
msgq_wmemcpy(first, message, msize);
msgq->write_index = wi + MSGQ_MESSAGE_HEADER_WSIZE +
message->size;
} else {
/* message wrapped */
msgq_wmemcpy(first, message, qremainder);
msgq_wmemcpy(msgq->queue, (int32_t *)message +
qremainder, msize - qremainder);
msgq->write_index = wi + MSGQ_MESSAGE_HEADER_WSIZE +
message->size - msgq->size;
}
} else {
pr_err("NULL: msgq %p message %p\n", msgq, message);
ret = -EFAULT; /* Bad Address */
}
return ret;
}
EXPORT_SYMBOL(msgq_queue_message);
/**
* msgq_dequeue_message - Dequeues a message from the queue
* @msgq: pointer to the client message queue
* @message: Message buffer to copy to or
* NULL to discard the current message
*
* This function returns 0 if no error has occurred.
* msgq_message_t::size will be set to the size of the message
* in words. ERR_NO_MEMORY will be returned if the buffer is too small
* for the queued message. ERR_NO_MSG will be returned if there is no
* message in the queue.
*
*
*/
int32_t msgq_dequeue_message(msgq_t *msgq, msgq_message_t *message)
{
int32_t ret = 0;
int32_t ri;
int32_t wi;
msgq_message_t *msg;
if (!msgq) {
pr_err("NULL: msgq %p\n", msgq);
return -EFAULT; /* Bad Address */
}
ri = msgq->read_index;
wi = msgq->write_index;
msg = (msgq_message_t *)&msgq->queue[msgq->read_index];
if (ri == wi) {
/* empty queue */
if (message)
message->size = 0;
pr_err("%s failed: msgq ri: %d, wi %d; NO MSG\n",
__func__, msgq->read_index, msgq->write_index);
ret = -ENOMSG;
} else if (!message) {
/* no input buffer, discard top message */
ri += MSGQ_MESSAGE_HEADER_WSIZE + msg->size;
msgq->read_index = ri < msgq->size ? ri : ri - msgq->size;
} else if (message->size < msg->size) {
/* return buffer too small */
pr_err("%s failed: msgq ri: %d, wi %d, NO SPACE\n",
__func__, msgq->read_index, msgq->write_index);
message->size = msg->size;
ret = -ENOSPC;
} else {
/* copy message to the output buffer */
int32_t msize = MSGQ_MESSAGE_HEADER_WSIZE + msg->size;
int32_t *first = &msgq->queue[msgq->read_index];
int32_t *end = &msgq->queue[msgq->size];
int32_t qremainder = end - first;
if (msize < qremainder) {
msgq_wmemcpy(message, first, msize);
msgq->read_index = ri + MSGQ_MESSAGE_HEADER_WSIZE +
msg->size;
} else {
/* message wrapped */
msgq_wmemcpy(message, first, qremainder);
msgq_wmemcpy((int32_t *)message + qremainder,
msgq->queue, msize - qremainder);
msgq->read_index = ri + MSGQ_MESSAGE_HEADER_WSIZE +
msg->size - msgq->size;
}
}
return ret;
}
EXPORT_SYMBOL(msgq_dequeue_message);

View File

@@ -0,0 +1,41 @@
/*
* nvadsp_shared_sema.c
*
* ADSP Shared Semaphores
*
* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
nvadsp_shared_sema_t *
nvadsp_shared_sema_init(uint8_t nvadsp_shared_sema_id)
{
return NULL;
}
status_t nvadsp_shared_sema_destroy(nvadsp_shared_sema_t *sema)
{
return -ENOENT;
}
status_t nvadsp_shared_sema_acquire(nvadsp_shared_sema_t *sema)
{
return -ENOENT;
}
status_t nvadsp_shared_sema_release(nvadsp_shared_sema_t *sema)
{
return -ENOENT;
}

View File

@@ -0,0 +1,111 @@
/*
* Copyright (C) 2015-2022, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/version.h>
#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
#include <soc/tegra/chip-id.h>
#else
#include <soc/tegra/fuse.h>
#endif
#include <linux/platform_device.h>
#include <linux/tegra_nvadsp.h>
#include <linux/tegra-hsp.h>
#include <linux/irqchip/tegra-agic.h>
#include "dev.h"
#include "os.h"
#include "dev-t18x.h"
#if IS_ENABLED(CONFIG_TEGRA_HSP)
static void nvadsp_dbell_handler(void *data)
{
struct platform_device *pdev = data;
struct device *dev = &pdev->dev;
dev_info(dev, "APE DBELL handler\n");
}
#endif
/* Function to return the ADMA page number (0 indexed) used by guest */
static int tegra_adma_query_dma_page(void)
{
struct device_node *np = NULL;
int adma_page = 0, ret = 0, i = 0;
static const char *compatible[] = {
"nvidia,tegra210-adma",
"nvidia,tegra210-adma-hv",
"nvidia,tegra186-adma",
"nvidia,tegra194-adma-hv",
};
for (i = 0; i < ARRAY_SIZE(compatible); i++) {
np = of_find_compatible_node(NULL, NULL, compatible[i]);
if (np == NULL)
continue;
/*
* In DT, "adma-page" property is 1 indexed
* If property is present, update return value to be 0 indexed
* If property is absent, return default value as page 0
*/
ret = of_property_read_u32(np, "adma-page", &adma_page);
if (ret == 0)
adma_page = adma_page - 1;
break;
}
pr_info("%s: adma-page %d\n", __func__, adma_page);
return adma_page;
}
int nvadsp_os_t18x_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = 0, adma_ch_page, val = 0;
if (is_tegra_hypervisor_mode()) {
adma_ch_page = tegra_adma_query_dma_page();
/* Set ADSP to do decompression again */
val = ADSP_CONFIG_DECOMPRESS_EN << ADSP_CONFIG_DECOMPRESS_SHIFT;
/* Set ADSP to know its virtualized configuration */
val = val | (ADSP_CONFIG_VIRT_EN << ADSP_CONFIG_VIRT_SHIFT);
/* Encode DMA Page Bits with DMA page information */
val = val | (adma_ch_page << ADSP_CONFIG_DMA_PAGE_SHIFT);
/* Write to HWMBOX5 */
hwmbox_writel(val, drv_data->chip_data->adsp_os_config_hwmbox);
/* Clear HWMBOX0 for ADSP Guest reset handling */
hwmbox_writel(0, drv_data->chip_data->hwmb.hwmbox0_reg);
return 0;
}
#if IS_ENABLED(CONFIG_TEGRA_HSP)
ret = tegra_hsp_db_add_handler(HSP_MASTER_APE,
nvadsp_dbell_handler, pdev);
if (ret)
dev_err(&pdev->dev,
"failed to add HSP_MASTER_APE DB handler\n");
#endif
return ret;
}

View File

@@ -0,0 +1,21 @@
/*
* Copyright (C) 2016-2017, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "dev.h"
#include "dev-t21x.h"
int nvadsp_os_t21x_init(struct platform_device *pdev)
{
return 0;
}

View File

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,198 @@
/*
* os.h
*
* A header file containing data structures shared with ADSP OS
*
* Copyright (C) 2014-2022 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_OS_H
#define __TEGRA_NVADSP_OS_H
#include <linux/firmware.h>
#include "adsp_shared_struct.h"
#include "dev.h"
#define CONFIG_ADSP_DRAM_LOG_WITH_TAG 1
/* enable profiling of load init start */
#define RECORD_STATS 0
#define SYM_NAME_SZ 128
#define AMC_EVP_RESET_VEC_0 0x700
#define AMC_EVP_UNDEF_VEC_0 0x704
#define AMC_EVP_SWI_VEC_0 0x708
#define AMC_EVP_PREFETCH_ABORT_VEC_0 0x70c
#define AMC_EVP_DATA_ABORT_VEC_0 0x710
#define AMC_EVP_RSVD_VEC_0 0x714
#define AMC_EVP_IRQ_VEC_0 0x718
#define AMC_EVP_FIQ_VEC_0 0x71c
#define AMC_EVP_RESET_ADDR_0 0x720
#define AMC_EVP_UNDEF_ADDR_0 0x724
#define AMC_EVP_SWI_ADDR_0 0x728
#define AMC_EVP_PREFETCH_ABORT_ADDR_0 0x72c
#define AMC_EVP_DATA_ABORT_ADDR_0 0x730
#define AMC_EVP_RSVD_ADDR_0 0x734
#define AMC_EVP_IRQ_ADDR_0 0x738
#define AMC_EVP_FIQ_ADDR_0 0x73c
#define AMC_EVP_SIZE (AMC_EVP_FIQ_ADDR_0 - AMC_EVP_RESET_VEC_0 + 4)
#define AMC_EVP_WSIZE (AMC_EVP_SIZE >> 2)
#define OS_LOAD_TIMEOUT 5000 /* ms */
#define ADSP_COM_MBOX_ID 2
#define MIN_ADSP_FREQ 38400000lu /* in Hz */
/* macros used to find the current mode of ADSP */
#define MODE_MASK 0x1f
#define MODE_USR 0x10
#define MODE_FIQ 0x11
#define MODE_IRQ 0x12
#define MODE_SVC 0x13
#define MODE_MON 0x16
#define MODE_ABT 0x17
#define MODE_UND 0x1b
#define MODE_SYS 0x1f
/*
* ADSP OS Config
*
* DECOMPRESS (Bit 0) : Set if ADSP FW needs to be decompressed
* VIRT CONFIG (Bit 1) : Set if virtualized configuration
* DMA PAGE (Bits 7:4) : Contains DMA page information
*/
#define ADSP_CONFIG_DECOMPRESS_SHIFT 0
#define ADSP_CONFIG_DECOMPRESS_EN 1
#define ADSP_CONFIG_DECOMPRESS_MASK (1 << ADSP_CONFIG_DECOMPRESS_SHIFT)
#define ADSP_CONFIG_VIRT_SHIFT 1
#define ADSP_CONFIG_VIRT_EN 1
#define ADSP_CONFIG_VIRT_MASK (1 << ADSP_CONFIG_VIRT_SHIFT)
#define ADSP_CONFIG_DMA_PAGE_SHIFT 4
#define ADSP_CONFIG_DMA_PAGE_MASK (0xF << ADSP_CONFIG_DMA_PAGE_SHIFT)
enum adsp_os_cmd {
ADSP_OS_BOOT_COMPLETE,
ADSP_OS_SUSPEND,
ADSP_OS_RESUME,
ADSP_OS_STOP,
};
#if RECORD_STATS
#define RECORD_STAT(x) \
(x = ktime_to_ns(ktime_get()) - x)
#define EQUATE_STAT(x, y) \
(x = y)
#define RECORD_TIMESTAMP(x) \
(x = nvadsp_get_timestamp_counter())
#else
#define RECORD_STAT(x)
#define EQUATE_STAT(x, y)
#define RECORD_TIMESTAMP(x)
#endif
/**
* struct global_sym_info - Global Symbol information required by app loader.
* @name: Name of the symbol
* @addr: Address of the symbol
* @info: Type and binding attributes
*/
struct global_sym_info {
char name[SYM_NAME_SZ];
uint32_t addr;
unsigned char info;
};
struct adsp_module {
const char *name;
void *handle;
void *module_ptr;
uint32_t adsp_module_ptr;
size_t size;
const struct app_mem_size mem_size;
bool dynamic;
char version[16];
};
struct app_load_stats {
s64 ns_time_load;
s64 ns_time_service_parse;
s64 ns_time_module_load;
s64 ns_time_req_firmware;
s64 ns_time_layout;
s64 ns_time_native_load;
s64 ns_time_load_mbox_send_time;
s64 ns_time_load_wait_time;
s64 ns_time_native_load_complete;
u64 ns_time_adsp_map;
u64 ns_time_adsp_app_load;
u64 ns_time_adsp_send_status;
u64 adsp_receive_timestamp;
u64 host_send_timestamp;
u64 host_receive_timestamp;
};
struct app_init_stats {
s64 ns_time_app_init;
s64 ns_time_app_alloc;
s64 ns_time_instance_memory;
s64 ns_time_native_call;
u64 ns_time_adsp_app_init;
u64 ns_time_adsp_mem_instance_map;
u64 ns_time_adsp_init_call;
u64 ns_time_adsp_send_status;
u64 adsp_receive_timestamp;
};
struct app_start_stats {
s64 ns_time_app_start;
s64 ns_time_native_call;
s64 ns_time_adsp_app_start;
u64 ns_time_app_thread_creation;
u64 ns_time_app_thread_detach;
u64 ns_time_app_thread_resume;
u64 ns_time_adsp_send_status;
u64 adsp_receive_timestamp;
};
static inline int nvadsp_os_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
if (drv_data->chip_data->os_init)
return drv_data->chip_data->os_init(pdev);
return -EINVAL;
}
int nvadsp_os_probe(struct platform_device *);
int nvadsp_app_module_probe(struct platform_device *);
void *nvadsp_da_to_va_mappings(u64 da, int len);
int nvadsp_add_load_mappings(phys_addr_t pa, void *mapping, int len);
struct elf32_shdr *nvadsp_get_section(const struct firmware *, char *);
struct global_sym_info *find_global_symbol(const char *);
void update_nvadsp_app_shared_ptr(void *);
struct adsp_module *load_adsp_dynamic_module(const char *, const char *,
struct device *);
struct adsp_module *load_adsp_static_module(const char *,
struct adsp_shared_app *, struct device *);
void unload_adsp_module(struct adsp_module *);
int allocate_memory_from_adsp(void **, unsigned int);
bool is_adsp_dram_addr(u64);
int load_adsp_static_apps(void);
#endif /* __TEGRA_NVADSP_OS_H */