platform: tegra: Import from linux-4.4

Import tegra platform drivers from linux-4.4. These files
are introduced or touched by the following commits in
linux-4.4:

5798930 arm: mach-tegra: Get rid of apbio.{c,h}
618d424 platform: tegra: enable denver_serr only for t18x
c9f681e arm: tegra: get rid of nct header
14cb7cf platform: tegra: nvdumper: Remove code which need NCT
46adb21 platform: tegra: mc: make mc-sid driver scalable and upstreamable
b0ea9ac usb: phy: Move header files to include/linux
7d63191 platform: tegra: mc: allow modules to access mc registers
1d5ac46 soc/tegra: Add GPU EDP Management
09166fd soc/tegra: Add CPU EDP Management
7118c16 platform: powergate: tegra: update ISPA name
59cebf1 bwmgr: Add tegra21 specific bwmgr functionality
e91ada2 bwmgr: merge dram specific functionality to common code
19fb2fe drivers: platform: tegra: remove TEGRA_21x defconfig
b87f6739 platform: nvadsp: fix is_mboxq_full check
b11cdba platform: tegra: powergate: do not handle SATA clk
8324487 platform: tegra: mc: remove cl34000094 hacks
a77d415 platform: tegra: mc: rename mc-sid.c to tegra-mc-sid.c
df6d5a8 platform: tegra: remove support for soctherm
e96ddb9 Merge "Merge branch 'dev/praithatha/k4.4_mc_sid' into dev-kernel-4.4" into dev-kernel-4.4
2f69aa5 Merge branch 'dev/praithatha/k4.4_mc_sid' into dev-kernel-4.4
e171bb1 adsp: dfs: override adsp dfs and reinit actmon
b7ca294 platform: tegra: mc: Add streamID configuration
457e747 nvdumper: fixed Coverity defects
e12c0c0 platform: tegra: Use ARCH_TEGRA_APE for APE PD code
40eba0d platform: tegra: drop flowctrl code
568ad6e central actmon: fix extra interrupt
944bdbf clk: tegra: Correct some nvenc clock names
5c957fe driver: platform: tegra: GR support
5af5163 coverity: Fix the error reported by Coverity
576ea23 tegra-virt-alt: Add adsp virtualization support
7407552 platform: tegra: nvadsp: Fix coverity defect
d11b6a0 platform: tegra: nvadsp: Fix coverity defect
6c41ef0 tegra: nvadsp: fix compilation when CONFIG_PM=n
4262e32 adsp: Add virtualization check
85384e2 Fix an uninitialized local value error
44a8d9c tegra: mc: export symbol tegra_set_latency_allowance
f699ce7 platform: nvadsp: disable app loading for secload
5ed4b72 drivers: tegra: allow to get "version" from dt
9b6fe36 tegra: add path to allow include
60d96e4 platform: tegra: nvadsp: nvadsp_os_get_version fix
79622a2 drivers: tegra: ape firmware version
27fb719 platform: tegra: iommu: enable linear map setup
b77bade platform: nvadsp: fix adsp clock handle
110b085 platform: tegra: Remove code for shared pad control
b770e0e bwmgr: pmqos: fix sparse warning
dd0400a platform: tegra: remove unused tegra_map_common_io
b343db9 platform: tegra: remove Tegra core voltage cap support
df9df6b platform/tegra: remove PMC wakeup support codes
c669b9a platform/tegra: pmc-iopower: switch to upstream PMC driver
688b6a6 platform/tegra: bootrom-pmc: switch to upstream PMC driver
d7d34e6 platform/tegra: remove reboot.c
60b4a47 soc/tegra: pmc: switch to upstream PMC driver
2c9409f bootprof: Separate discontinuous regions
e0cb4d2 arm64: tegra186: remove unused register nodes
12c2ba4 fiq reverts
65d8ccb platform: nvadsp: fix clk checks
b2eb017 platform: tegra: remove support for auto power detection
c403625 tegra: powergate: cleanup code for unsupported platform
589fecb platform: tegra: Removing unsupported platform sources
5f578a8 irqchip: gic: use CPU_PM only when registering
d45c1ea platform: tegra: remove nvdump support for t12x/t13x
9b0b6de Revert "Revert "android: fiq_debugger: FIQ glue for ARM64 CPUs""
d5351a1 Revert "Revert "drivers: platform: enumerate fiq_debugger device""
27af58f Revert "Revert "platform: tegra: clean up fiq_enable() implementation""
688e514 platform: tegra: Remove support for Tegra clock framework
c15896d tegra: denver-knobs: Remove nvmstat_pg stats
019f065 platform: tegra: Remove support for TEGRA_13x_SOC
207db5f drivers: platform: iommu: add linear mapping on lut
1bc0602 denver: hardwood: use device tree to find IRQ number
b11f182 isomgr: Apply ISO client's BW limit
132843c platform: tegra: Remove the API tegra_gpio_to_wake()
8a2892d platform: tegra: deprecate APIs to change waketable runtime
e2f5924 prod: Add support for masked write in partially prod config
e94ac08 platform: tegra: powergate: Remove support for TEGRA_12x_SOC
6b4c4cb platform: tegra: mc: Remove support for TEGRA_12x_SOC
39977fb platform: tegra: Remove drivers for TEGRA_12x_SOC
e17670c drivers: platform: fix denver_mca driver
61c6f5e tegra: powergate: add clocks for XUSBB
bb10919 tegra: powergate: cleanup clock and reset handling
73b944e tegra: powergate: correct xusbb partition reset ID
b3dc4f4 iommu: arm-smmu: add linear mapping support
6c9bcbb platform: tegra: support disabling CONFIG_PM
98d04a5 platform: tegra: remove legacy speedo files
c6b9acf platform: tegra: APIs to set prod based on name/index/offset/mask
a04d242 platform: tegra: mc: fix build error
1d8c939 platform: tegra: Remove miscellaneous platform specific drivers
daab9eb tegra: powergate: shorten some con ids
229a25f platform: tegra: remove tegra common support
3e71442 bwmgr: Remove checks to limit emc_freq
9f3f176 tegra: powergate: use new reset API and use ioremap
11cd9c8 platform: tegra: Disable T210 TCF cpufreq driver
224ecab platform: tegra: Remove the common board support
1813dd1 ivc: fix missing peer notification
a56ac67 ivc: fix incorrect counter reset
844c7a1 ivc: Remove nframes power of two check
5a3ec3a bwmgr: Add more information to clients info sysfs
522777c platform: tegra: remove raydium touch support
251660a platform: tegra: remove unneccessary panel file
0915b9a bwmgr: Add API to get core emc rate
c66f6bc platform: tegra: Add pmqos emc floor handling
a7b51df Add CONFIG_TEGRA_BOND_OUT and disable by default
29cd4ee Stubbed out tegra_periph_reset
3c07fd4 iommu: smmu: Changed the chip ids for 4.4 naming
3d780a1 platform: nvadsp: fix MAXCLKLATENCY offset
e470cdd platform: tegra: bwmgr: add a disable knob
1d8e851 tegra: denver-knobs: Use correct CPU id for bgallowed
db9711f bwmgr: Add errno to debug print
96ed52e drivers: usb: pmc: rebase pmc driver for kernel-4.4
6510703 tegra: denver-knobs: add tracer_control support
3a285a9 tegra: actmon: missing sysfs_attr_init()
0c83659 tegra: actmon: add sysfs node for emc load
2477286 platform: tegra: pmc: add usb2 sleepwalk APIs
8bcf839 pinctrl: add Tegra21x PADCTL/UPHY driver
acaa486 platform: nvadsp: make version func static
6028232 Revert "platform: tegra: clean up fiq_enable() implementation"
cc80660 Revert "drivers: platform: enumerate fiq_debugger device"
ab2cc4c Revert "android: fiq_debugger: FIQ glue for ARM64 CPUs"
38ff9fd drivers: platform: enumerate fiq_debugger device
1dd509c platform: tegra: clean up fiq_enable() implementation
0a87d11 android: fiq_debugger: FIQ glue for ARM64 CPUs
74ec787 platform: nvadsp: add adsp os version
9bd6a7f platform: tegra: Remove use of is_partition_clk_disabled
a56b821 drivers: class 'tegra-firmwares'
c859a13 Kconfig: Rename included Kconfigs
804f706 platform: tegra: add kfuse sensing for hdcp 1.x
9b3510f drivers: platform: tegra: only compile tegra_irq_to_wake for Tegra186
fb5394c drivers: platform: tegra: switch powergating driver to CCF
66d0faf tegra: mc: declare tegra_get_chip_id and use in mc
eb90d56 drivers: platform: Move DPD IO control into pmc driver
3816cef arm64: enable CCF
a196e10 soc/tegra: Add TEGRA148 and TEGRA186 chip id
8354256 drivers: platform: tegra: switch powergate driver to tegra_get_chip_id()
473ce73 platform: tegra: mc: adapt la driver to upstream tegra_get_chip_id()
0f2668b Kconfig: replace source with trysource for external projects
266255a platform: tegra: Add support to find prod setting
ac65ac1 platfor: tegra: prod: Do not use kmemleak_not_leak() for managed allocation
96358ea platform: tegra: prod: use devm_ for allocation
53933ed platform: tegra: Add sysfs entry for suspend/resume time
bfef5bc drivers: platform: tegra: add tegra_wake_to_gpio() interface
4409ca6 tegra: central_actmon: fix DEBUG_FS=n build
80aa543 platform:tegra:bwmgr: fix build issue.
f97d139 tegra: denver-knobs: fix build issue.
bcfc89c platform: tegra: prod: Add managed version of tegra_prod_get_from_node
85288f1 platform: tegra: prod: Add support for nested prod nodes.
0be37d5 platform: tegra: prod: Get rid of tegra_prod_release()
753f71d platform: tegra: prod: Remove unused APIs from public header
5b92965 platform: tegra: pmc: Use devm for tegra_prod allocation
21af9cb platform: tegra: prod: Add APIs to managed allocation of prod_list
2d9312b platform: tegra: move definition of tegra_prod_list structure to private
0d1efe1 platform: tegra: prod: Use for_each_available_child_of_node()
3014a93 tegra:nvadsp:fix issue with CONFIG_DEBUG_FS=n
edd37fd platform: tegra: prod: Use proper variable name
168ec7b platform: tegra: prod: Fix parsing of prod setting for package/board
bc8cd66 platform: tegra: prod: Make allocated memory as kmemleak_not_leak()
380f89f PM/Domains: Remove use of 'need_save' and 'need_restore'
fdf13ea platform: nvadsp: change perms of debugfs files
bc34a73 nvadsp: console: keep track of app context
62d0040 nvadsp: console: fix app name handling
f113a66 platform: nvadsp: export adsp dump sys
e0e907b platform: tegra: nvadsp: fix setting boot freqs
c7fb6ee Revert "drivers: platform: tegra: add proper config check"
c5b1e8b platform:tegra:bwmgr: Fix bwmgr floor aggregation
584b06e platform: tegra: bwmgr: Add Security Engine Client
f63d36d tegra: gpo: move gpio-tegra
8171ecb security: Use a common secure monitor interface
1359955 platform: nvadsp: fix unused function build issue
aa55e67 platform: tegra: Remove unused functions
6f2d8d8 platform: tegra: Change value of need_save to 'true'
0c28fab Merge "Merge agic changes" into dev-kernel
0174199 platform: nvadsp: Correct AGIC irqs state dumps
0d7fec4 irqchip: Move AGIC Kconfig from nvadsp
b7ce47f kernel: change kernel path
7627eeb platform: tegra: mc: fix coverity defects
d4afd62 platform: tegra: remove platform emc driver for Tegra210
93e504f platform: tegra: mc: include module.h
4a3d8fa platform: tegra: add thermal throttling config
b99e994 platform: tegra: move tegra_fiq_debugger from mach-tegra
b89bbf4b tegra: t21x: restore irqs-t21x.h file
3ec5fa9 arm64: Copy over more T210 files
0c3285d platform: tegra: mc: enable latency allowance code compile
80b090f platform: tegra: mc: add T18x PTSA vars
345b7ee tegra: mc: add set LA funcion pointers
26c3314 platform: tegra: Add support for mask with 1s
62e11eb platform: tegra: mc: Don't compile latency_allowance for k4.4
dc9cafc platform: tegra: Add protection to code
55dabeb drivers: platform: tegra: add proper config check
99a65ef platform: tegra: tegra_usb_pmc depends on T210
eae2a6d tegra: denver-knobs: fix seq_printf return value
74467a6 TEMP: drivers: Kconfig: Use source instead of trysource
000acb1 platform: tegra: add missing headers and build fixes
e228595 kconfig: add trysource to kernel-t18x
94daaaa drivers: platform: tegra: Initialize drivers/platform/tegra inside kernel-next
c5c90b8 drivers: platform: tegra: Add miscellaneous platform specific drivers
3faa2fe drivers: platform: tegra: PTM driver for t12x and t21x
8953022 drivers: platform: tegra: Add kfuse driver
19a844c drivers: platform: tegra: Tegra USB Padctrl driver
4596579 drivers: platform: tegra: Add nvdumper source for platforms
a03e4b0 drivers: platform: tegra: Add wakeup related source
363b7ee drivers: platform: tegra: central_actmon: Add common and support for T21x
9fe72d5 drivers: platform: tegra: mc: Add platform specific MC source
6c2b078 drivers: platform: tegra: Adding denver specific drivers
976c8b9 drivers: platform: tegra: Add bootloader drivers
a97be5b drivers: platform: tegra: nvadsp: Add platform specific nvadsp drivers
b29af75 drivers: platform: tegra: Add powergating drivers
899dddd platform: tegra: Add Tegra Clocks drivers for various platforms

Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Change-Id: Ic232ac71a09fe5176247692630db5bc6107573fa
Signed-off-by: Nitin Kumbhar <nkumbhar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1537316
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Mikko Perttunen
2016-12-09 15:06:19 +02:00
committed by Laxman Dewangan
parent bf7d14249c
commit a37d2fe7e3
39 changed files with 10745 additions and 0 deletions

View File

@@ -0,0 +1,80 @@
config TEGRA_NVADSP
bool "Enable Host ADSP driver"
depends on ARCH_TEGRA_APE
default n
help
Enables support for Host ADSP driver.
If unsure, say N
config TEGRA_NVADSP_ON_SMMU
bool "Use SMMU to relocate ADSP"
depends on (TEGRA_IOMMU_SMMU || OF_TEGRA_IOMMU_SMMU) && TEGRA_NVADSP
default n
help
Use SMMU to relocate ADSP OS.
config TEGRA_ADSP_DFS
bool "Enable ADSP DFS"
depends on TEGRA_NVADSP
default n
help
Enable ADSP dynamic frequency scaling. Use this config
to scale adsp frequency via actmon or set fixed value.
If unsure, say N
config TEGRA_ADSP_ACTMON
bool "Enable ADSP ACTMON"
depends on TEGRA_ADSP_DFS
default n
help
Enable ADSP actmon. It converts adsp activty to frequency and
asks adsp dfs to set the adsp frequency. Use it if adsp frequency
to be scaled dynamically by actmon.
If unsure, say N
config TEGRA_ADSP_CPUSTAT
bool "Enable ADSP CPUSTAT"
depends on DEBUG_FS && TEGRA_NVADSP && !TEGRA_ADSP_ACTMON
default n
help
Enable ADSP cpu usage measurement using actmon
If unsure, say N
config TEGRA_ADSP_FILEIO
bool "Enable ADSP file io"
default n
help
Enable dumping to and reading from file on host from ADSP
If unsure, say N
config TEGRA_EMC_APE_DFS
bool "Enable emc dfs due to APE"
depends on ARCH_TEGRA_APE
default n
help
Enable emc dfs due to APE DRAM access
If unsure, say N
config TEGRA_ADSP_CONSOLE
bool "Enable ADSP console"
depends on TEGRA_NVADSP
default y
help
Enable ADSP console access
If unsure, say N
config MBOX_ACK_HANDLER
bool "Enable mailbox acknowledge handler"
depends on TEGRA_NVADSP
default n
help
Enable mailbox acknowledge handler
if unsure, say N

View File

@@ -0,0 +1,41 @@
GCOV_PROFILE := y
ccflags-y += -Werror
ifeq ($(CONFIG_ARCH_TEGRA_18x_SOC),y)
ccflags-y += -I$(srctree)/../t18x/drivers/platform/tegra/nvadsp
endif
obj-y := nvadsp.o
nvadsp-objs += dev.o os.o app.o app_loader_linker.o\
amc.o nvadsp_dram.o \
nvadsp_shared_sema.o nvadsp_arb_sema.o \
hwmailbox.o mailbox.o msgq.o \
mem_manager.o aram_manager.o dram_app_mem_manager.o
ifeq ($(CONFIG_TEGRA_ADSP_DFS),y)
nvadsp-objs += adsp_dfs.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_ACTMON),y)
nvadsp-objs += ape_actmon.o
endif
ifeq ($(CONFIG_TEGRA_EMC_APE_DFS),y)
nvadsp-objs += emc_dfs.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_CONSOLE),y)
nvadsp-objs += adsp_console_dbfs.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_CPUSTAT),y)
nvadsp-objs += adsp_cpustat.o
endif
ifeq ($(CONFIG_ARCH_TEGRA_21x_SOC),y)
nvadsp-objs += dev-t21x.o
nvadsp-objs += os-t21x.o
endif
ifeq ($(CONFIG_TEGRA_ADSP_FILEIO),y)
nvadsp-objs += adspff.o
endif

View File

@@ -0,0 +1,428 @@
/*
* adsp_console_dbfs.c
*
* adsp mailbox console driver
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tegra_nvadsp.h>
#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include "dev.h"
#include "adsp_console_ioctl.h"
#include "adsp_console_dbfs.h"
#define USE_RUN_APP_API
static int open_cnt;
#define ADSP_APP_CTX_MAX 32
static uint64_t adsp_app_ctx_vals[ADSP_APP_CTX_MAX];
static int adsp_app_ctx_add(uint64_t ctx)
{
int i;
if (ctx == 0)
return -EINVAL;
for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
if (adsp_app_ctx_vals[i] == 0) {
adsp_app_ctx_vals[i] = ctx;
return 0;
}
}
return -EINVAL;
}
static int adsp_app_ctx_check(uint64_t ctx)
{
int i;
if (ctx == 0)
return -EINVAL;
for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
if (adsp_app_ctx_vals[i] == ctx)
return 0;
}
return -EINVAL;
}
static void adsp_app_ctx_remove(uint64_t ctx)
{
int i;
for (i = 0; i < ADSP_APP_CTX_MAX; i++) {
if (adsp_app_ctx_vals[i] == ctx) {
adsp_app_ctx_vals[i] = 0;
return;
}
}
}
static int adsp_consol_open(struct inode *i, struct file *f)
{
int ret;
uint16_t snd_mbox_id = 30;
struct nvadsp_cnsl *console = i->i_private;
struct device *dev = console->dev;
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
if (open_cnt)
return -EBUSY;
open_cnt++;
ret = 0;
f->private_data = console;
if (!drv_data->adsp_os_running)
goto exit_open;
ret = nvadsp_mbox_open(&console->shl_snd_mbox, &snd_mbox_id,
"adsp_send_cnsl", NULL, NULL);
if (!ret)
goto exit_open;
pr_err("adsp_consol: Failed to init adsp_consol send mailbox");
memset(&console->shl_snd_mbox, 0, sizeof(struct nvadsp_mbox));
open_cnt--;
exit_open:
return ret;
}
static int adsp_consol_close(struct inode *i, struct file *f)
{
int ret = 0;
struct nvadsp_cnsl *console = i->i_private;
struct nvadsp_mbox *mbox = &console->shl_snd_mbox;
struct device *dev = console->dev;
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
open_cnt--;
if (!drv_data->adsp_os_running || (0 == mbox->id))
goto exit_close;
ret = nvadsp_mbox_close(mbox);
if (ret)
pr_err("adsp_consol: Failed to close adsp_consol send mailbox)");
memset(mbox, 0, sizeof(struct nvadsp_mbox));
exit_close:
return ret;
}
static long
adsp_consol_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int ret = 0;
uint16_t *mid;
uint16_t mbxid = 0;
uint32_t data;
uint64_t ctx2;
nvadsp_app_info_t *app_info;
struct adsp_consol_run_app_arg_t app_args;
struct nvadsp_cnsl *console = f->private_data;
struct nvadsp_mbox *mbox;
struct device *dev = console->dev;
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
void __user *uarg = (void __user *)arg;
if (_IOC_TYPE(cmd) != NV_ADSP_CONSOLE_MAGIC)
return -EFAULT;
if ((_IOC_NR(cmd) != _IOC_NR(ADSP_CNSL_LOAD)) &&
(!drv_data->adsp_os_running)) {
dev_info(dev, "adsp_consol: os not running.");
return -EPERM;
}
if ((_IOC_NR(cmd) != _IOC_NR(ADSP_CNSL_LOAD)) &&
(0 == console->shl_snd_mbox.id)) {
dev_info(dev, "adsp_consol: Mailboxes not open.");
return -EPERM;
}
switch (_IOC_NR(cmd)) {
case _IOC_NR(ADSP_CNSL_LOAD):
ret = 0;
if (drv_data->adsp_os_running)
break;
mbxid = 30;
mbox = &console->shl_snd_mbox;
ret = nvadsp_os_load();
if (ret) {
dev_info(dev, "adsp_consol: Load OS Failed.");
break;
}
ret = nvadsp_os_start();
if (ret) {
dev_info(dev, "adsp_consol: Start OS Failed.");
break;
}
ret = nvadsp_mbox_open(mbox, &mbxid,
"adsp_send_cnsl", NULL, NULL);
if (!ret)
break;
pr_err("adsp_consol: Failed to init adsp_consol send mailbox");
memset(mbox, 0, sizeof(struct nvadsp_mbox));
break;
case _IOC_NR(ADSP_CNSL_RUN_APP):
if (!access_ok(0, uarg,
sizeof(struct adsp_consol_run_app_arg_t)))
return -EACCES;
ret = copy_from_user(&app_args, uarg,
sizeof(app_args));
if (ret) {
ret = -EACCES;
break;
}
app_args.app_name[NVADSP_NAME_SZ_MAX] = '\0';
#ifdef USE_RUN_APP_API
app_args.ctx2 = (uint64_t)nvadsp_run_app(NULL,
app_args.app_name,
(nvadsp_app_args_t *)&app_args.args[0],
NULL, 0, true);
if (!app_args.ctx2) {
dev_info(dev, "adsp_consol: unable to run %s\n",
app_args.app_name);
return -EINVAL;
}
if (adsp_app_ctx_add(app_args.ctx2)) {
dev_info(dev, "adsp_consol: unable to add %s ctx\n",
app_args.app_name);
return -EINVAL;
}
#else
app_args.ctx1 = (uint64_t)nvadsp_app_load(app_args.app_path,
app_args.app_name);
if (!app_args.ctx1) {
dev_info(dev,
"adsp_consol: dynamic app load failed %s\n",
app_args.app_name);
return -EINVAL;
}
if (adsp_app_ctx_add(app_args.ctx1)) {
dev_info(dev, "adsp_consol: unable to add %s ctx\n",
app_args.app_name);
return -EINVAL;
}
dev_info(dev, "adsp_consol: calling nvadsp_app_init\n");
app_args.ctx2 =
(uint64_t)nvadsp_app_init((void *)app_args.ctx1, NULL);
if (!app_args.ctx2) {
dev_info(dev,
"adsp_consol: unable to initilize the app\n");
return -EINVAL;
}
if (adsp_app_ctx_add(app_args.ctx2)) {
dev_info(dev, "adsp_consol: unable to add %s ctx\n",
app_args.app_name);
return -EINVAL;
}
dev_info(dev, "adsp_consol: calling nvadsp_app_start\n");
ret = nvadsp_app_start((void *)app_args.ctx2);
if (ret) {
dev_info(dev, "adsp_consol: unable to start the app\n");
break;
}
#endif
ret = copy_to_user((void __user *) arg, &app_args,
sizeof(struct adsp_consol_run_app_arg_t));
if (ret)
ret = -EACCES;
break;
case _IOC_NR(ADSP_CNSL_STOP_APP):
if (!access_ok(0, uarg,
sizeof(struct adsp_consol_run_app_arg_t)))
return -EACCES;
ret = copy_from_user(&app_args, uarg,
sizeof(app_args));
if (ret) {
ret = -EACCES;
break;
}
#ifdef USE_RUN_APP_API
if (!app_args.ctx2) {
ret = -EACCES;
break;
}
if (adsp_app_ctx_check(app_args.ctx2)) {
dev_info(dev, "adsp_consol: unable to check %s ctx\n",
app_args.app_name);
return -EINVAL;
}
app_args.ctx1 = (uint64_t)
((nvadsp_app_info_t *)app_args.ctx2)->handle;
nvadsp_exit_app((nvadsp_app_info_t *)app_args.ctx2, false);
nvadsp_app_unload((const void *)app_args.ctx1);
adsp_app_ctx_remove(app_args.ctx2);
#else
if ((!app_args.ctx2) || (!app_args.ctx1)) {
ret = -EACCES;
break;
}
if (adsp_app_ctx_check(app_args.ctx2) ||
adsp_app_ctx_check(app_args.ctx1)) {
dev_info(dev, "adsp_consol: unable to check %s ctx\n",
app_args.app_name);
return -EINVAL;
}
nvadsp_app_deinit((void *)app_args.ctx2);
nvadsp_app_unload((void *)app_args.ctx1);
adsp_app_ctx_remove(app_args.ctx2);
adsp_app_ctx_remove(app_args.ctx1);
#endif
break;
case _IOC_NR(ADSP_CNSL_CLR_BUFFER):
break;
case _IOC_NR(ADSP_CNSL_OPN_MBX):
if (!access_ok(0, uarg, sizeof(ctx2)))
return -EACCES;
ret = copy_from_user(&ctx2, uarg, sizeof(ctx2));
if (ret) {
ret = -EACCES;
break;
}
if (adsp_app_ctx_check(ctx2)) {
dev_info(dev, "adsp_consol: unable to check ctx\n");
return -EINVAL;
}
app_info = (nvadsp_app_info_t *)ctx2;
if (app_info && app_info->mem.shared) {
mid = (short *)(app_info->mem.shared);
dev_info(dev, "adsp_consol: open %x\n", *mid);
mbxid = *mid;
}
ret = nvadsp_mbox_open(&console->app_mbox, &mbxid,
"app_mbox", NULL, NULL);
if (ret) {
pr_err("adsp_consol: Failed to open app mailbox");
ret = -EACCES;
}
break;
case _IOC_NR(ADSP_CNSL_CLOSE_MBX):
mbox = &console->app_mbox;
while (!nvadsp_mbox_recv(mbox, &data, 0, 0))
;
ret = nvadsp_mbox_close(mbox);
if (ret)
break;
memset(mbox, 0, sizeof(struct nvadsp_mbox));
break;
case _IOC_NR(ADSP_CNSL_PUT_MBX):
if (!access_ok(0, uarg,
sizeof(uint32_t)))
return -EACCES;
ret = copy_from_user(&data, uarg,
sizeof(uint32_t));
if (ret) {
ret = -EACCES;
break;
}
ret = nvadsp_mbox_send(&console->app_mbox, data,
NVADSP_MBOX_SMSG, 0, 0);
break;
case _IOC_NR(ADSP_CNSL_GET_MBX):
if (!access_ok(0, uarg,
sizeof(uint32_t)))
return -EACCES;
ret = nvadsp_mbox_recv(&console->app_mbox, &data, 0, 0);
if (ret)
break;
ret = copy_to_user(uarg, &data,
sizeof(uint32_t));
if (ret)
ret = -EACCES;
break;
case _IOC_NR(ADSP_CNSL_PUT_DATA):
if (!access_ok(0, uarg,
sizeof(struct adsp_consol_run_app_arg_t)))
return -EACCES;
ret = copy_from_user(&data, uarg, sizeof(uint32_t));
if (ret) {
ret = -EACCES;
break;
}
return nvadsp_mbox_send(&console->shl_snd_mbox, data,
NVADSP_MBOX_SMSG, 0, 0);
break;
default:
dev_info(dev, "adsp_consol: invalid command\n");
return -EINVAL;
}
return ret;
}
static const struct file_operations adsp_console_operations = {
.open = adsp_consol_open,
.release = adsp_consol_close,
#ifdef CONFIG_COMPAT
.compat_ioctl = adsp_consol_ioctl,
#endif
.unlocked_ioctl = adsp_consol_ioctl
};
int
adsp_create_cnsl(struct dentry *adsp_debugfs_root, struct nvadsp_cnsl *cnsl)
{
int ret = 0;
struct device *dev = cnsl->dev;
if (IS_ERR_OR_NULL(adsp_debugfs_root)) {
ret = -ENOENT;
goto err_out;
}
if (!debugfs_create_file("adsp_console", S_IRUSR,
adsp_debugfs_root, cnsl,
&adsp_console_operations)) {
dev_err(dev,
"unable to create adsp console debug fs file\n");
ret = -ENOENT;
goto err_out;
}
memset(&cnsl->app_mbox, 0, sizeof(cnsl->app_mbox));
err_out:
return ret;
}

View File

@@ -0,0 +1,31 @@
/*
* adsp_console_dbfs.h
*
* A header file for adsp console driver
*
* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef ADSP_CNSL_DBFS_H
#define ADSP_CNSL_DBFS_H
struct nvadsp_cnsl {
struct device *dev;
struct nvadsp_mbox shl_snd_mbox;
struct nvadsp_mbox app_mbox;
};
int
adsp_create_cnsl(struct dentry *adsp_debugfs_root, struct nvadsp_cnsl *cnsl);
#endif /* ADSP_CNSL_DBFS_H */

View File

@@ -0,0 +1,55 @@
/*
* adsp_console_ioctl.h
*
* A header file for adsp console driver
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef ADSP_CNSL_IOCTL_H
#define ADSP_CNSL_IOCTL_H
#include <linux/ioctl.h>
#if !defined(NVADSP_NAME_SZ)
#define NVADSP_NAME_SZ 128
#endif
#define NVADSP_NAME_SZ_MAX (NVADSP_NAME_SZ - 1)
#if !defined(ARGV_SIZE_IN_WORDS)
#define ARGV_SIZE_IN_WORDS 128
#endif
#define NV_ADSP_CONSOLE_MAGIC 'q'
struct adsp_consol_run_app_arg_t {
char app_name[NVADSP_NAME_SZ];
char app_path[NVADSP_NAME_SZ];
uint32_t args[ARGV_SIZE_IN_WORDS + 1];
uint64_t ctx1;
uint64_t ctx2;
};
#define ADSP_CNSL_LOAD _IO(NV_ADSP_CONSOLE_MAGIC, 0x01)
#define ADSP_CNSL_CLR_BUFFER _IO(NV_ADSP_CONSOLE_MAGIC, 0x02)
#define ADSP_CNSL_PUT_DATA _IOW(NV_ADSP_CONSOLE_MAGIC, 0x03, uint32_t *)
#define ADSP_CNSL_RUN_APP _IOWR(NV_ADSP_CONSOLE_MAGIC, 0x04,\
struct adsp_consol_run_app_arg_t *)
#define ADSP_CNSL_STOP_APP _IOWR(NV_ADSP_CONSOLE_MAGIC, 0x05,\
struct adsp_consol_run_app_arg_t *)
#define ADSP_CNSL_OPN_MBX _IOW(NV_ADSP_CONSOLE_MAGIC, 0x06, void *)
#define ADSP_CNSL_CLOSE_MBX _IO(NV_ADSP_CONSOLE_MAGIC, 0x07)
#define ADSP_CNSL_PUT_MBX _IOW(NV_ADSP_CONSOLE_MAGIC, 0x08, uint32_t *)
#define ADSP_CNSL_GET_MBX _IOR(NV_ADSP_CONSOLE_MAGIC, 0x09, uint32_t *)
#endif

View File

@@ -0,0 +1,330 @@
/*
* Copyright (C) 2015-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/platform/tegra/clock.h>
#include <linux/irqchip/tegra-agic.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include "dev.h"
#define ACTMON_DEV_CTRL 0x00
#define ACTMON_DEV_CTRL_ENB (0x1 << 31)
#define ACTMON_DEV_CTRL_AT_END_ENB (0x1 << 15)
#define ACTMON_DEV_CTRL_PERIODIC_ENB (0x1 << 13)
#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT (0)
#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK (0xff << 0)
#define ACTMON_DEV_COUNT 0x18
#define ACTMON_DEV_INTR_STATUS 0x20
#define ACTMON_DEV_INTR_AT_END (0x1 << 27)
#define ACTMON_DEV_COUNT_WEGHT 0x24
#define ACTMON_DEV_SAMPLE_CTRL 0x28
#define ACTMON_DEV_SAMPLE_CTRL_TICK_65536 (0x1 << 2)
#define ACTMON_DEV_SAMPLE_CTRL_TICK_256 (0x0 << 1)
#define AMISC_ACTMON_0 0x54
#define AMISC_ACTMON_CNT_TARGET_ENABLE (0x1 << 31)
#define ACTMON_REG_OFFSET 0x800
/* milli second divider as SAMPLE_TICK*/
#define SAMPLE_MS_DIVIDER 65536
struct adsp_cpustat {
int irq;
struct device *device;
const char *dev_id;
spinlock_t lock;
struct clk *ape_clk;
struct clk *adsp_clk;
unsigned long ape_freq;
unsigned long adsp_freq;
u64 cur_usage;
bool enable;
u64 max_usage;
void __iomem *base;
};
static struct adsp_cpustat cpustat;
static struct adsp_cpustat *cpumon;
static inline u32 actmon_readl(u32 offset)
{
return __raw_readl(cpumon->base + offset);
}
static inline void actmon_writel(u32 val, u32 offset)
{
__raw_writel(val, cpumon->base + offset);
}
static inline void actmon_wmb(void)
{
wmb();
}
static irqreturn_t adsp_cpustat_isr(int irq, void *dev_id)
{
u32 val;
unsigned long period, flags;
spin_lock_irqsave(&cpumon->lock, flags);
val = actmon_readl(ACTMON_DEV_INTR_STATUS);
actmon_writel(val, ACTMON_DEV_INTR_STATUS);
if (val & ACTMON_DEV_INTR_AT_END) {
period = (255 * SAMPLE_MS_DIVIDER) / cpumon->ape_freq;
cpumon->cur_usage =
((u64)actmon_readl(ACTMON_DEV_COUNT) * 100) / (period * cpumon->adsp_freq);
if (cpumon->cur_usage > cpumon->max_usage)
cpumon->max_usage = cpumon->cur_usage;
}
spin_unlock_irqrestore(&cpumon->lock, flags);
return IRQ_HANDLED;
}
static void configure_actmon(void)
{
u32 val;
/* Set countb weight to 256 */
actmon_writel(0x100, ACTMON_DEV_COUNT_WEGHT);
/* Enable periodic sampling */
val = actmon_readl(ACTMON_DEV_CTRL);
val |= ACTMON_DEV_CTRL_PERIODIC_ENB;
/* Set sampling period to max i,e, 255 ape clks */
val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
val |= (0xFF <<
ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
& ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
/* Enable the AT_END interrupt */
val |= ACTMON_DEV_CTRL_AT_END_ENB;
actmon_writel(val, ACTMON_DEV_CTRL);
actmon_writel(ACTMON_DEV_SAMPLE_CTRL_TICK_65536,
ACTMON_DEV_SAMPLE_CTRL);
actmon_wmb();
}
static void adsp_cpustat_enable(void)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&cpumon->lock, flags);
val = actmon_readl(ACTMON_DEV_CTRL);
val |= ACTMON_DEV_CTRL_ENB;
actmon_writel(val, ACTMON_DEV_CTRL);
actmon_wmb();
enable_irq(cpumon->irq);
spin_unlock_irqrestore(&cpumon->lock, flags);
}
static void adsp_cpustat_disable(void)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&cpumon->lock, flags);
disable_irq(cpumon->irq);
val = actmon_readl(ACTMON_DEV_CTRL);
val &= ~ACTMON_DEV_CTRL_ENB;
actmon_writel(val, ACTMON_DEV_CTRL);
actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS);
actmon_wmb();
spin_unlock_irqrestore(&cpumon->lock, flags);
}
#define RW_MODE (S_IWUSR | S_IRUSR)
#define RO_MODE S_IRUSR
static int cur_usage_get(void *data, u64 *val)
{
*val = cpumon->cur_usage;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cur_usage_fops, cur_usage_get, NULL, "%llu\n");
static int max_usage_get(void *data, u64 *val)
{
*val = cpumon->max_usage;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(max_usage_fops, max_usage_get, NULL, "%llu\n");
static int enable_set(void *data, u64 val)
{
if (cpumon->enable == (bool)val)
return 0;
cpumon->enable = (bool)val;
if (cpumon->enable)
adsp_cpustat_enable();
else
adsp_cpustat_disable();
return 0;
}
static int enable_get(void *data, u64 *val)
{
*val = cpumon->enable;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(enable_fops, enable_get, enable_set, "%llu\n");
static int cpustat_debugfs_init(struct nvadsp_drv_data *drv)
{
int ret = -ENOMEM;
struct dentry *d, *dir;
if (!drv->adsp_debugfs_root)
return ret;
dir = debugfs_create_dir("adsp_cpustat", drv->adsp_debugfs_root);
if (!dir)
return ret;
d = debugfs_create_file(
"cur_usage", RO_MODE, dir, cpumon, &cur_usage_fops);
if (!d)
return ret;
d = debugfs_create_file(
"max_usage", RO_MODE, dir, cpumon, &max_usage_fops);
if (!d)
return ret;
d = debugfs_create_file(
"enable", RW_MODE, dir, cpumon, &enable_fops);
if (!d)
return ret;
return 0;
}
int adsp_cpustat_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
static void __iomem *amisc_base;
u32 val;
int ret = -EINVAL;
if (drv->cpustat_initialized)
return 0;
cpumon = &cpustat;
spin_lock_init(&cpumon->lock);
cpumon->base = drv->base_regs[AMISC] + ACTMON_REG_OFFSET;
amisc_base = drv->base_regs[AMISC];
cpumon->ape_clk = clk_get_sys(NULL, "adsp.ape");
if (IS_ERR_OR_NULL(cpumon->ape_clk)) {
dev_err(cpumon->device, "Failed to find adsp.ape clk\n");
ret = -EINVAL;
goto err_ape_clk;
}
ret = clk_prepare_enable(cpumon->ape_clk);
if (ret) {
dev_err(cpumon->device, "Failed to enable ape clock\n");
goto err_ape_enable;
}
cpumon->ape_freq = clk_get_rate(cpumon->ape_clk) / 1000;
cpumon->adsp_clk = clk_get_sys(NULL, "adsp_cpu");
if (IS_ERR_OR_NULL(cpumon->adsp_clk)) {
dev_err(cpumon->device, "Failed to find adsp cpu clock\n");
ret = -EINVAL;
goto err_adsp_clk;
}
ret = clk_prepare_enable(cpumon->adsp_clk);
if (ret) {
dev_err(cpumon->device, "Failed to enable adsp cpu clock\n");
goto err_adsp_enable;
}
cpumon->adsp_freq = clk_get_rate(cpumon->adsp_clk) / 1000;
/* Enable AMISC_ACTMON */
val = __raw_readl(amisc_base + AMISC_ACTMON_0);
val |= AMISC_ACTMON_CNT_TARGET_ENABLE;
__raw_writel(val, amisc_base + AMISC_ACTMON_0);
/* Clear all interrupts */
actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS);
/* One time configuration of actmon regs */
configure_actmon();
cpumon->irq = drv->agic_irqs[ACTMON_VIRQ];
ret = request_irq(cpumon->irq, adsp_cpustat_isr,
IRQ_TYPE_LEVEL_HIGH, "adsp_actmon", cpumon);
if (ret) {
dev_err(cpumon->device, "Failed irq %d request\n", cpumon->irq);
goto err_irq;
}
cpustat_debugfs_init(drv);
drv->cpustat_initialized = true;
return 0;
err_irq:
clk_disable_unprepare(cpumon->adsp_clk);
err_adsp_enable:
clk_put(cpumon->adsp_clk);
err_adsp_clk:
clk_disable_unprepare(cpumon->ape_clk);
err_ape_enable:
clk_put(cpumon->ape_clk);
err_ape_clk:
return ret;
}
int adsp_cpustat_exit(struct platform_device *pdev)
{
status_t ret = 0;
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
if (!drv->cpustat_initialized) {
ret = -EINVAL;
goto end;
}
free_irq(cpumon->irq, cpumon);
clk_disable_unprepare(cpumon->adsp_clk);
clk_put(cpumon->adsp_clk);
clk_put(cpumon->ape_clk);
drv->cpustat_initialized = false;
end:
return ret;
}

View File

@@ -0,0 +1,847 @@
/*
* adsp_dfs.c
*
* adsp dynamic frequency scaling
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include <linux/clk/tegra.h>
#include <linux/seq_file.h>
#include <asm/cputime.h>
#include <linux/slab.h>
#include "dev.h"
#include "ape_actmon.h"
#include "os.h"
#ifndef CONFIG_TEGRA_ADSP_ACTMON
void actmon_rate_change(unsigned long freq, bool override)
{
}
#endif
#define MBOX_TIMEOUT 5000 /* in ms */
#define HOST_ADSP_DFS_MBOX_ID 3
enum adsp_dfs_reply {
ACK,
NACK,
};
/*
* Freqency in Hz.The frequency always needs to be a multiple of 12.8 Mhz and
* should be extended with a slab 38.4 Mhz.
*/
static unsigned long adsp_cpu_freq_table[] = {
MIN_ADSP_FREQ,
MIN_ADSP_FREQ * 2,
MIN_ADSP_FREQ * 3,
MIN_ADSP_FREQ * 4,
MIN_ADSP_FREQ * 5,
MIN_ADSP_FREQ * 6,
MIN_ADSP_FREQ * 7,
MIN_ADSP_FREQ * 8,
MIN_ADSP_FREQ * 9,
MIN_ADSP_FREQ * 10,
MIN_ADSP_FREQ * 11,
MIN_ADSP_FREQ * 12,
MIN_ADSP_FREQ * 13,
MIN_ADSP_FREQ * 14,
MIN_ADSP_FREQ * 15,
MIN_ADSP_FREQ * 16,
MIN_ADSP_FREQ * 17,
MIN_ADSP_FREQ * 18,
MIN_ADSP_FREQ * 19,
MIN_ADSP_FREQ * 20,
MIN_ADSP_FREQ * 21,
};
struct adsp_dfs_policy {
bool enable;
/* update_freq_flag = TRUE, ADSP ACKed the new freq
* = FALSE, ADSP NACKed the new freq
*/
bool update_freq_flag;
const char *clk_name;
unsigned long min; /* in kHz */
unsigned long max; /* in kHz */
unsigned long cur; /* in kHz */
unsigned long cpu_min; /* ADSP min freq(KHz). Remain unchanged */
unsigned long cpu_max; /* ADSP max freq(KHz). Remain unchanged */
struct clk *adsp_clk;
struct notifier_block rate_change_nb;
struct nvadsp_mbox mbox;
#ifdef CONFIG_DEBUG_FS
struct dentry *root;
#endif
unsigned long ovr_freq;
};
struct adsp_freq_stats {
struct device *dev;
unsigned long long last_time;
int last_index;
u64 time_in_state[sizeof(adsp_cpu_freq_table) \
/ sizeof(adsp_cpu_freq_table[0])];
int state_num;
};
static struct adsp_dfs_policy *policy;
static struct adsp_freq_stats freq_stats;
static struct device *device;
static struct clk *ape_emc_clk;
static DEFINE_MUTEX(policy_mutex);
static bool is_os_running(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
if (!drv_data->adsp_os_running) {
dev_dbg(&pdev->dev, "%s: adsp os is not loaded\n", __func__);
return false;
}
return true;
}
/* Expects and returns freq in Hz as table is formmed in terms of Hz */
static unsigned long adsp_get_target_freq(unsigned long tfreq, int *index)
{
int i;
int size = sizeof(adsp_cpu_freq_table) / sizeof(adsp_cpu_freq_table[0]);
if (tfreq <= adsp_cpu_freq_table[0]) {
*index = 0;
return adsp_cpu_freq_table[0];
}
if (tfreq >= adsp_cpu_freq_table[size - 1]) {
*index = size - 1;
return adsp_cpu_freq_table[size - 1];
}
for (i = 1; i < size; i++) {
if ((tfreq <= adsp_cpu_freq_table[i]) &&
(tfreq > adsp_cpu_freq_table[i - 1])) {
*index = i;
return adsp_cpu_freq_table[i];
}
}
return 0;
}
static void adspfreq_stats_update(void)
{
unsigned long long cur_time;
cur_time = get_jiffies_64();
freq_stats.time_in_state[freq_stats.last_index] += cur_time -
freq_stats.last_time;
freq_stats.last_time = cur_time;
}
/* adsp clock rate change notifier callback */
static int adsp_dfs_rc_callback(
struct notifier_block *nb, unsigned long rate, void *v)
{
unsigned long freq = rate / 1000;
int old_index, new_index = 0;
/* update states */
adspfreq_stats_update();
old_index = freq_stats.last_index;
adsp_get_target_freq(rate, &new_index);
if (old_index != new_index)
freq_stats.last_index = new_index;
if (policy->ovr_freq && freq == policy->ovr_freq) {
/* Re-init ACTMON when user requested override freq is met */
actmon_rate_change(freq, true);
policy->ovr_freq = 0;
} else
actmon_rate_change(freq, false);
return NOTIFY_OK;
};
static struct adsp_dfs_policy dfs_policy = {
.enable = 1,
.clk_name = "adsp_cpu",
.rate_change_nb = {
.notifier_call = adsp_dfs_rc_callback,
},
};
/*
* update_freq - update adsp freq and ask adsp to change timer as
* change in adsp freq.
* tfreq - target frequency in KHz
* return - final freq got set.
* - 0, incase of error.
*
* Note - Policy->cur would be updated via rate
* change notifier, when freq is changed in hw
*
*/
static unsigned long update_freq(unsigned long tfreq)
{
u32 efreq;
int index;
int ret;
unsigned long old_freq;
enum adsp_dfs_reply reply;
struct nvadsp_mbox *mbx = &policy->mbox;
struct nvadsp_drv_data *drv = dev_get_drvdata(device);
tfreq = adsp_get_target_freq(tfreq * 1000, &index);
if (!tfreq) {
dev_info(device, "unable get the target freq\n");
return 0;
}
old_freq = policy->cur;
if ((tfreq / 1000) == old_freq) {
dev_dbg(device, "old and new target_freq is same\n");
return 0;
}
ret = clk_set_rate(policy->adsp_clk, tfreq);
if (ret) {
dev_err(device, "failed to set adsp freq:%d\n", ret);
policy->update_freq_flag = false;
return 0;
}
efreq = adsp_to_emc_freq(tfreq / 1000);
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
tegra_bwmgr_set_emc(drv->bwmgr, efreq * 1000,
TEGRA_BWMGR_SET_EMC_FLOOR);
} else {
ret = clk_set_rate(ape_emc_clk, efreq * 1000);
if (ret) {
dev_err(device, "failed to set ape.emc clk:%d\n", ret);
policy->update_freq_flag = false;
goto err_out;
}
}
dev_dbg(device, "sending change in freq:%lu\n", tfreq);
/*
* Ask adsp to do action upon change in freq. ADSP and Host need to
* maintain the same freq table.
*/
ret = nvadsp_mbox_send(mbx, index,
NVADSP_MBOX_SMSG, true, 100);
if (ret) {
dev_err(device, "%s:host to adsp, mbox_send failure. ret:%d\n",
__func__, ret);
policy->update_freq_flag = false;
goto err_out;
}
ret = nvadsp_mbox_recv(&policy->mbox, &reply, true, MBOX_TIMEOUT);
if (ret) {
dev_err(device, "%s:host to adsp, mbox_receive failure. ret:%d\n",
__func__, ret);
policy->update_freq_flag = false;
goto err_out;
}
switch (reply) {
case ACK:
/* Set Update freq flag */
dev_dbg(device, "adsp freq change status:ACK\n");
policy->update_freq_flag = true;
break;
case NACK:
/* Set Update freq flag */
dev_dbg(device, "adsp freq change status:NACK\n");
policy->update_freq_flag = false;
break;
default:
dev_err(device, "Error: adsp freq change status\n");
}
dev_dbg(device, "%s:status received from adsp: %s, tfreq:%lu\n", __func__,
(policy->update_freq_flag == true ? "ACK" : "NACK"), tfreq);
err_out:
if (!policy->update_freq_flag) {
ret = clk_set_rate(policy->adsp_clk, old_freq * 1000);
if (ret) {
dev_err(device, "failed to resume adsp freq:%lu\n", old_freq);
policy->update_freq_flag = false;
}
efreq = adsp_to_emc_freq(old_freq / 1000);
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
tegra_bwmgr_set_emc(drv->bwmgr, efreq * 1000,
TEGRA_BWMGR_SET_EMC_FLOOR);
} else {
ret = clk_set_rate(ape_emc_clk, efreq * 1000);
if (ret) {
dev_err(device,
"failed to set ape.emc clk:%d\n", ret);
policy->update_freq_flag = false;
}
}
tfreq = old_freq;
}
return tfreq / 1000;
}
/* Set adsp dfs policy min freq(Khz) */
static int policy_min_set(void *data, u64 val)
{
int ret = -EINVAL;
unsigned long min = (unsigned long)val;
if (!is_os_running(device))
return ret;
mutex_lock(&policy_mutex);
if (!policy->enable) {
dev_err(device, "adsp dfs policy is not enabled\n");
goto exit_out;
}
if (min == policy->min)
goto exit_out;
else if (min < policy->cpu_min)
min = policy->cpu_min;
else if (min >= policy->cpu_max)
min = policy->cpu_max;
if (min > policy->cur) {
min = update_freq(min);
if (min)
policy->cur = min;
}
if (min)
policy->min = min;
ret = 0;
exit_out:
mutex_unlock(&policy_mutex);
return ret;
}
#ifdef CONFIG_DEBUG_FS
#define RW_MODE (S_IWUSR | S_IRUSR)
#define RO_MODE S_IRUSR
/* Get adsp dfs staus: 0: disabled, 1: enabled */
static int dfs_enable_get(void *data, u64 *val)
{
mutex_lock(&policy_mutex);
*val = policy->enable;
mutex_unlock(&policy_mutex);
return 0;
}
/* Enable/disable adsp dfs */
static int dfs_enable_set(void *data, u64 val)
{
mutex_lock(&policy_mutex);
policy->enable = (bool) val;
mutex_unlock(&policy_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(enable_fops, dfs_enable_get,
dfs_enable_set, "%llu\n");
/* Get adsp dfs policy min freq(KHz) */
static int policy_min_get(void *data, u64 *val)
{
if (!is_os_running(device))
return -EINVAL;
mutex_lock(&policy_mutex);
*val = policy->min;
mutex_unlock(&policy_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(min_fops, policy_min_get,
policy_min_set, "%llu\n");
/* Get adsp dfs policy max freq(KHz) */
static int policy_max_get(void *data, u64 *val)
{
if (!is_os_running(device))
return -EINVAL;
mutex_lock(&policy_mutex);
*val = policy->max;
mutex_unlock(&policy_mutex);
return 0;
}
/* Set adsp dfs policy max freq(KHz) */
static int policy_max_set(void *data, u64 val)
{
int ret = -EINVAL;
unsigned long max = (unsigned long)val;
if (!is_os_running(device))
return ret;
mutex_lock(&policy_mutex);
if (!policy->enable) {
dev_err(device, "adsp dfs policy is not enabled\n");
goto exit_out;
}
if (!max || ((max > policy->cpu_max) || (max == policy->max)))
goto exit_out;
else if (max <= policy->cpu_min)
max = policy->cpu_min;
if (max < policy->cur)
max = update_freq(max);
if (max)
policy->cur = policy->max = max;
ret = 0;
exit_out:
mutex_unlock(&policy_mutex);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(max_fops, policy_max_get,
policy_max_set, "%llu\n");
/* Get adsp dfs policy's current freq */
static int policy_cur_get(void *data, u64 *val)
{
if (!is_os_running(device))
return -EINVAL;
mutex_lock(&policy_mutex);
*val = policy->cur;
mutex_unlock(&policy_mutex);
return 0;
}
/* Set adsp dfs policy cur freq(Khz) */
static int policy_cur_set(void *data, u64 val)
{
int ret = -EINVAL;
unsigned long cur = (unsigned long)val;
if (!is_os_running(device))
return ret;
mutex_lock(&policy_mutex);
if (policy->enable) {
dev_err(device, "adsp dfs is enabled, should be disabled first\n");
goto exit_out;
}
if (!cur || cur == policy->cur)
goto exit_out;
/* Check tfreq policy sanity */
if (cur < policy->min)
cur = policy->min;
else if (cur > policy->max)
cur = policy->max;
cur = update_freq(cur);
if (cur)
policy->cur = cur;
ret = 0;
exit_out:
mutex_unlock(&policy_mutex);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(cur_fops, policy_cur_get,
policy_cur_set, "%llu\n");
/*
* Print residency in each freq levels
*/
static void dump_stats_table(struct seq_file *s, struct adsp_freq_stats *fstats)
{
int i;
mutex_lock(&policy_mutex);
if (is_os_running(device))
adspfreq_stats_update();
for (i = 0; i < fstats->state_num; i++) {
seq_printf(s, "%lu %llu\n",
(long unsigned int)(adsp_cpu_freq_table[i] / 1000),
cputime64_to_clock_t(fstats->time_in_state[i]));
}
mutex_unlock(&policy_mutex);
}
static int show_time_in_state(struct seq_file *s, void *data)
{
struct adsp_freq_stats *fstats =
(struct adsp_freq_stats *) (s->private);
dump_stats_table(s, fstats);
return 0;
}
static int stats_open(struct inode *inode, struct file *file)
{
return single_open(file, show_time_in_state, inode->i_private);
}
static const struct file_operations time_in_state_fops = {
.open = stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int adsp_dfs_debugfs_init(struct platform_device *pdev)
{
int ret = -ENOMEM;
struct dentry *d, *root;
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
if (!drv->adsp_debugfs_root)
return ret;
root = debugfs_create_dir("adsp_dfs", drv->adsp_debugfs_root);
if (!root)
return ret;
policy->root = root;
d = debugfs_create_file("enable", RW_MODE, root, NULL,
&enable_fops);
if (!d)
goto err_out;
d = debugfs_create_file("min_freq", RW_MODE, root, NULL,
&min_fops);
if (!d)
goto err_out;
d = debugfs_create_file("max_freq", RW_MODE, root,
NULL, &max_fops);
if (!d)
goto err_out;
d = debugfs_create_file("cur_freq", RW_MODE, root, NULL,
&cur_fops);
if (!d)
goto err_out;
d = debugfs_create_file("time_in_state", RO_MODE,
root, &freq_stats,
&time_in_state_fops);
if (!d)
goto err_out;
return 0;
err_out:
debugfs_remove_recursive(root);
policy->root = NULL;
dev_err(&pdev->dev,
"unable to create adsp logger debug fs file\n");
return ret;
}
#endif
/*
* Set target freq.
* @params:
* freq: adsp freq in KHz
*/
void adsp_cpu_set_rate(unsigned long freq)
{
mutex_lock(&policy_mutex);
if (!policy->enable) {
dev_dbg(device, "adsp dfs policy is not enabled\n");
goto exit_out;
}
if (freq < policy->min)
freq = policy->min;
else if (freq > policy->max)
freq = policy->max;
freq = update_freq(freq);
if (freq)
policy->cur = freq;
exit_out:
mutex_unlock(&policy_mutex);
}
/*
* Override adsp freq and reinit actmon counters
*
* @params:
* freq: adsp freq in KHz
* return - final freq got set.
* - 0, incase of error.
*
*/
unsigned long adsp_override_freq(unsigned long freq)
{
int index;
unsigned long ret_freq = 0;
mutex_lock(&policy_mutex);
if (freq < policy->min)
freq = policy->min;
else if (freq > policy->max)
freq = policy->max;
freq = adsp_get_target_freq(freq * 1000, &index);
if (!freq) {
dev_warn(device, "unable get the target freq\n");
goto exit_out;
}
freq = freq / 1000; /* In KHz */
if (freq == policy->cur) {
ret_freq = freq;
goto exit_out;
}
policy->ovr_freq = freq;
ret_freq = update_freq(freq);
if (ret_freq)
policy->cur = ret_freq;
if (ret_freq != freq) {
dev_warn(device, "freq override to %lu rejected\n", freq);
policy->ovr_freq = 0;
goto exit_out;
}
exit_out:
mutex_unlock(&policy_mutex);
return ret_freq;
}
/*
* Set min ADSP freq.
*
* @params:
* freq: adsp freq in KHz
*/
void adsp_update_dfs_min_rate(unsigned long freq)
{
policy_min_set(NULL, freq);
}
/* Enable / disable dynamic freq scaling */
void adsp_update_dfs(bool val)
{
mutex_lock(&policy_mutex);
policy->enable = val;
mutex_unlock(&policy_mutex);
}
/* Should be called after ADSP os is loaded */
int adsp_dfs_core_init(struct platform_device *pdev)
{
int size = sizeof(adsp_cpu_freq_table) / sizeof(adsp_cpu_freq_table[0]);
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
uint16_t mid = HOST_ADSP_DFS_MBOX_ID;
int ret = 0;
u32 efreq;
if (drv->dfs_initialized)
return 0;
device = &pdev->dev;
policy = &dfs_policy;
if (IS_ENABLED(CONFIG_COMMON_CLK))
policy->adsp_clk = devm_clk_get(device, "adsp");
else
policy->adsp_clk = clk_get_sys(NULL, policy->clk_name);
if (IS_ERR_OR_NULL(policy->adsp_clk)) {
dev_err(&pdev->dev, "unable to find adsp clock\n");
ret = PTR_ERR(policy->adsp_clk);
goto end;
}
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
drv->bwmgr = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_APE_ADSP);
if (IS_ERR_OR_NULL(drv->bwmgr)) {
dev_err(&pdev->dev, "unable to register bwmgr\n");
ret = PTR_ERR(drv->bwmgr);
goto end;
}
} else {
/* Change emc freq as per the adsp to emc lookup table */
ape_emc_clk = clk_get_sys("ape", "emc");
if (IS_ERR_OR_NULL(ape_emc_clk)) {
dev_err(device, "unable to find ape.emc clock\n");
ret = PTR_ERR(ape_emc_clk);
goto end;
}
ret = clk_prepare_enable(ape_emc_clk);
if (ret) {
dev_err(device, "unable to enable ape.emc clock\n");
goto end;
}
}
policy->max = policy->cpu_max = drv->adsp_freq; /* adsp_freq in KHz */
policy->min = policy->cpu_min = adsp_cpu_freq_table[0] / 1000;
policy->cur = clk_get_rate(policy->adsp_clk) / 1000;
efreq = adsp_to_emc_freq(policy->cur);
if (IS_ENABLED(CONFIG_COMMON_CLK)) {
tegra_bwmgr_set_emc(drv->bwmgr, efreq * 1000,
TEGRA_BWMGR_SET_EMC_FLOOR);
} else {
ret = clk_set_rate(ape_emc_clk, efreq * 1000);
if (ret) {
dev_err(device, "failed to set ape.emc clk:%d\n", ret);
goto end;
}
}
adsp_get_target_freq(policy->cur * 1000, &freq_stats.last_index);
freq_stats.last_time = get_jiffies_64();
freq_stats.state_num = size;
freq_stats.dev = &pdev->dev;
memset(&freq_stats.time_in_state, 0, sizeof(freq_stats.time_in_state));
ret = nvadsp_mbox_open(&policy->mbox, &mid, "dfs_comm", NULL, NULL);
if (ret) {
dev_info(&pdev->dev, "unable to open mailbox. ret:%d\n", ret);
goto end;
}
#if !defined(CONFIG_COMMON_CLK)
if (policy->rate_change_nb.notifier_call) {
/*
* "adsp_cpu" clk is a shared user of parent adsp_cpu_bus clk;
* rate change notification should come from bus clock itself.
*/
struct clk *p = clk_get_parent(policy->adsp_clk);
if (!p) {
dev_err(&pdev->dev, "Failed to find adsp cpu parent clock\n");
ret = -EINVAL;
goto end;
}
ret = tegra_register_clk_rate_notifier(p,
&policy->rate_change_nb);
if (ret) {
dev_err(&pdev->dev, "rate change notifier err: %s\n",
policy->clk_name);
nvadsp_mbox_close(&policy->mbox);
goto end;
}
}
#endif
#ifdef CONFIG_DEBUG_FS
adsp_dfs_debugfs_init(pdev);
#endif
drv->dfs_initialized = true;
dev_dbg(&pdev->dev, "adsp dfs initialized ....\n");
return ret;
end:
if (policy->adsp_clk) {
if (IS_ENABLED(CONFIG_COMMON_CLK))
devm_clk_put(&pdev->dev, policy->adsp_clk);
else
clk_put(policy->adsp_clk);
}
if (IS_ENABLED(CONFIG_COMMON_CLK) && drv->bwmgr) {
tegra_bwmgr_set_emc(drv->bwmgr, 0,
TEGRA_BWMGR_SET_EMC_FLOOR);
tegra_bwmgr_unregister(drv->bwmgr);
} else if (ape_emc_clk) {
clk_disable_unprepare(ape_emc_clk);
clk_put(ape_emc_clk);
}
return ret;
}
int adsp_dfs_core_exit(struct platform_device *pdev)
{
status_t ret = 0;
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
/* return if dfs is not initialized */
if (!drv->dfs_initialized)
return -ENODEV;
ret = nvadsp_mbox_close(&policy->mbox);
if (ret)
dev_info(&pdev->dev,
"adsp dfs exit failed: mbox close error. ret:%d\n", ret);
#if !defined(CONFIG_COMMON_CLK)
tegra_unregister_clk_rate_notifier(clk_get_parent(policy->adsp_clk),
&policy->rate_change_nb);
#endif
if (policy->adsp_clk) {
if (IS_ENABLED(CONFIG_COMMON_CLK))
devm_clk_put(&pdev->dev, policy->adsp_clk);
else
clk_put(policy->adsp_clk);
}
if (IS_ENABLED(CONFIG_COMMON_CLK) && drv->bwmgr) {
tegra_bwmgr_set_emc(drv->bwmgr, 0,
TEGRA_BWMGR_SET_EMC_FLOOR);
tegra_bwmgr_unregister(drv->bwmgr);
} else if (ape_emc_clk) {
clk_disable_unprepare(ape_emc_clk);
clk_put(ape_emc_clk);
}
drv->dfs_initialized = false;
dev_dbg(&pdev->dev, "adsp dfs has exited ....\n");
return ret;
}

View File

@@ -0,0 +1,142 @@
/*
* adsp_shared_struct.h
*
* A header file containing shared data structures shared with ADSP OS
*
* Copyright (C) 2015-2016 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADSP_SHARED_STRUCT
#define __ADSP_SHARED_STRUCT
#include <linux/tegra_nvadsp.h>
#define APP_LOADER_MBOX_ID 1
#define ADSP_APP_FLAG_START_ON_BOOT 0x1
#define ADSP_OS_LOAD_TIMEOUT 5000 /* 5000 ms */
#define DRAM_DEBUG_LOG_SIZE 0x100000
#define NVADSP_NAME_SZ 128
struct app_mem_size {
uint64_t dram;
uint64_t dram_shared;
uint64_t dram_shared_wc;
uint64_t aram;
uint64_t aram_x;
} __packed;
struct adsp_shared_app {
char name[NVADSP_NAME_SZ];
struct app_mem_size mem_size;
int32_t mod_ptr;
int32_t flags;
int32_t dram_data_ptr;
int32_t shared_data_ptr;
int32_t shared_wc_data_ptr;
} __packed;
/* ADSP app loader message queue */
struct run_app_instance_data {
uint32_t adsp_mod_ptr;
uint64_t host_ref;
uint32_t adsp_ref;
uint32_t dram_data_ptr;
uint32_t dram_shared_ptr;
uint32_t dram_shared_wc_ptr;
uint32_t aram_ptr;
uint32_t aram_flag;
uint32_t aram_x_ptr;
uint32_t aram_x_flag;
struct app_mem_size mem_size;
nvadsp_app_args_t app_args;
uint32_t stack_size;
uint32_t message;
} __packed;
struct app_loader_data {
int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
struct run_app_instance_data app_init;
} __packed;
union app_loader_message {
msgq_message_t msgq_msg;
struct app_loader_data data;
} __aligned(4);
struct adsp_os_message_header {
int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
uint32_t message;
} __packed;
/* ADSP app complete message queue */
struct app_complete_status_data {
struct adsp_os_message_header header;
uint64_t host_ref;
uint32_t adsp_ref;
int32_t status;
} __packed;
struct adsp_static_app_data {
struct adsp_os_message_header header;
struct adsp_shared_app shared_app;
} __packed;
union app_complete_status_message {
msgq_message_t msgq_msg;
struct app_complete_status_data complete_status_data;
struct adsp_static_app_data static_app_data;
} __aligned(4);
/*ADSP message pool structure */
union app_loader_msgq {
msgq_t msgq;
struct {
int32_t header[MSGQ_HEADER_WSIZE];
int32_t queue[MSGQ_MAX_QUEUE_WSIZE];
};
};
/* ADSP APP shared message pool */
struct nvadsp_app_shared_msg_pool {
union app_loader_msgq app_loader_send_message;
union app_loader_msgq app_loader_recv_message;
} __packed;
/*ADSP shated OS args */
struct nvadsp_os_args {
int32_t timer_prescalar;
char logger[DRAM_DEBUG_LOG_SIZE];
uint64_t adsp_freq_hz;
char reserved[128];
} __packed;
/* ADSP OS info/status. Keep in sync with firmware. */
#define MAX_OS_VERSION_BUF 32
struct nvadsp_os_info {
char version[MAX_OS_VERSION_BUF];
char reserved[128];
} __packed;
/* ADSP OS shared memory */
struct nvadsp_shared_mem {
struct nvadsp_app_shared_msg_pool app_shared_msg_pool;
struct nvadsp_os_args os_args;
struct nvadsp_os_info os_info;
} __packed;
#endif /* __ADSP_SHARED_STRUCT */

View File

@@ -0,0 +1,422 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/fs.h>
#include <asm/segment.h>
#include <asm/uaccess.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/tegra_nvadsp.h>
#include "adspff.h"
struct file_struct {
struct file *fp;
unsigned long long wr_offset;
unsigned long long rd_offset;
};
static spinlock_t adspff_lock;
/******************************************************************************
* Kernel file functions
******************************************************************************/
struct file *file_open(const char *path, int flags, int rights)
{
struct file *filp = NULL;
mm_segment_t oldfs;
int err = 0;
oldfs = get_fs();
set_fs(get_ds());
filp = filp_open(path, flags, rights);
set_fs(oldfs);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
return NULL;
}
return filp;
}
void file_close(struct file *file)
{
filp_close(file, NULL);
}
int file_write(struct file *file, unsigned long long *offset,
unsigned char *data, unsigned int size)
{
mm_segment_t oldfs;
int ret = 0;
oldfs = get_fs();
set_fs(get_ds());
ret = vfs_write(file, data, size, offset);
set_fs(oldfs);
return ret;
}
uint32_t file_read(struct file *file, unsigned long long *offset,
unsigned char *data, unsigned int size)
{
mm_segment_t oldfs;
uint32_t ret = 0;
oldfs = get_fs();
set_fs(get_ds());
ret = vfs_read(file, data, size, offset);
set_fs(oldfs);
return ret;
}
/******************************************************************************
* ADSPFF file functions
******************************************************************************/
static struct adspff_shared_state_t *adspff;
static struct nvadsp_mbox rx_mbox;
/** *
* w - open for writing (file need not exist) *
* a - open for appending (file need not exist) *
* r+ - open for reading and writing, start at beginning *
* w+ - open for reading and writing (overwrite file) *
* a+ - open for reading and writing (append if file exists) */
void set_flags(union adspff_message_t *m, int *flags)
{
if (0 == strcmp(m->msg.payload.fopen_msg.modes, "r+"))
*flags = O_RDWR;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "w+"))
*flags = O_CREAT | O_RDWR | O_TRUNC;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "a+"))
*flags = O_APPEND | O_RDWR;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "r"))
*flags = O_RDONLY;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "w"))
*flags = O_CREAT | O_WRONLY | O_TRUNC;
else if (0 == strcmp(m->msg.payload.fopen_msg.modes, "a"))
*flags = O_CREAT | O_APPEND | O_WRONLY;
else
*flags = O_CREAT | O_RDWR;
}
void adspff_fopen(struct work_struct *work)
{
union adspff_message_t *message;
union adspff_message_t *msg_recv;
int flags = 0, ret = 0;
struct file_struct *file = kzalloc(sizeof(struct file_struct), GFP_KERNEL);
message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
message->msgq_msg.size = MSGQ_MSG_SIZE(struct fopen_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)message);
if (ret < 0) {
pr_err("fopen Dequeue failed %d.", ret);
kfree(message);
kfree(msg_recv);
return;
}
set_flags(message, &flags);
file->fp = file_open(
(const char *)message->msg.payload.fopen_msg.fname,
flags, S_IRWXU|S_IRWXG|S_IRWXO);
file->wr_offset = 0;
file->rd_offset = 0;
msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct fopen_recv_msg_t);
msg_recv->msg.payload.fopen_recv_msg.file = (int64_t)file;
ret = msgq_queue_message(&adspff->msgq_recv.msgq,
(msgq_message_t *)msg_recv);
if (ret < 0) {
pr_err("fopen Enqueue failed %d.", ret);
kfree(message);
kfree(msg_recv);
return;
}
nvadsp_mbox_send(&rx_mbox, adspff_cmd_fopen_recv,
NVADSP_MBOX_SMSG, 0, 0);
kfree(message);
kfree(msg_recv);
}
void adspff_fclose(struct work_struct *work)
{
union adspff_message_t *message;
struct file_struct *file = NULL;
int32_t ret = 0;
message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
message->msgq_msg.size = MSGQ_MSG_SIZE(struct fclose_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)message);
if (ret < 0) {
pr_err("fclose Dequeue failed %d.", ret);
kfree(message);
return;
}
file = (struct file_struct *)message->msg.payload.fclose_msg.file;
if (file) {
file_close(file->fp);
kfree(file);
file = NULL;
}
kfree(message);
}
void adspff_fwrite(struct work_struct *work)
{
union adspff_message_t message;
union adspff_message_t *msg_recv;
struct file_struct *file = NULL;
int ret = 0;
uint32_t size = 0;
uint32_t bytes_to_write = 0;
uint32_t bytes_written = 0;
msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct ack_msg_t);
message.msgq_msg.size = MSGQ_MSG_SIZE(struct fwrite_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)&message);
if (ret < 0) {
pr_err("fwrite Dequeue failed %d.", ret);
return;
}
file = (struct file_struct *)message.msg.payload.fwrite_msg.file;
size = message.msg.payload.fwrite_msg.size;
bytes_to_write = ((adspff->write_buf.read_index + size) < ADSPFF_SHARED_BUFFER_SIZE) ?
size : (ADSPFF_SHARED_BUFFER_SIZE - adspff->write_buf.read_index);
ret = file_write(file->fp, &file->wr_offset,
adspff->write_buf.data + adspff->write_buf.read_index, bytes_to_write);
bytes_written += ret;
if ((size - bytes_to_write) > 0) {
ret = file_write(file->fp, &file->wr_offset,
adspff->write_buf.data, size - bytes_to_write);
bytes_written += ret;
}
adspff->write_buf.read_index =
(adspff->write_buf.read_index + size) % ADSPFF_SHARED_BUFFER_SIZE;
/* send ack */
msg_recv->msg.payload.ack_msg.size = bytes_written;
ret = msgq_queue_message(&adspff->msgq_recv.msgq,
(msgq_message_t *)msg_recv);
if (ret < 0) {
pr_err("fread Enqueue failed %d.", ret);
kfree(msg_recv);
return;
}
nvadsp_mbox_send(&rx_mbox, adspff_cmd_ack,
NVADSP_MBOX_SMSG, 0, 0);
kfree(msg_recv);
}
void adspff_fread(struct work_struct *work)
{
union adspff_message_t *message;
union adspff_message_t *msg_recv;
struct file_struct *file = NULL;
uint32_t bytes_free;
uint32_t wi = adspff->read_buf.write_index;
uint32_t ri = adspff->read_buf.read_index;
uint8_t can_wrap = 0;
uint32_t size = 0, size_read = 0;
int32_t ret = 0;
if (ri <= wi) {
bytes_free = ADSPFF_SHARED_BUFFER_SIZE - wi + ri - 1;
can_wrap = 1;
} else {
bytes_free = ri - wi - 1;
can_wrap = 0;
}
message = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
msg_recv = kzalloc(sizeof(union adspff_message_t), GFP_KERNEL);
msg_recv->msgq_msg.size = MSGQ_MSG_SIZE(struct ack_msg_t);
message->msgq_msg.size = MSGQ_MSG_SIZE(struct fread_msg_t);
ret = msgq_dequeue_message(&adspff->msgq_send.msgq,
(msgq_message_t *)message);
if (ret < 0) {
pr_err("fread Dequeue failed %d.", ret);
kfree(message);
kfree(msg_recv);
return;
}
file = (struct file_struct *)message->msg.payload.fread_msg.file;
size = message->msg.payload.fread_msg.size;
if (bytes_free < size) {
size_read = 0;
goto send_ack;
}
if (can_wrap) {
uint32_t bytes_to_read = (size < (ADSPFF_SHARED_BUFFER_SIZE - wi)) ?
size : (ADSPFF_SHARED_BUFFER_SIZE - wi);
ret = file_read(file->fp, &file->rd_offset,
adspff->read_buf.data + wi, bytes_to_read);
size_read = ret;
if (ret < bytes_to_read)
goto send_ack;
if ((size - bytes_to_read) > 0) {
ret = file_read(file->fp, &file->rd_offset,
adspff->read_buf.data, size - bytes_to_read);
size_read += ret;
goto send_ack;
}
} else {
ret = file_read(file->fp, &file->rd_offset,
adspff->read_buf.data + wi, size);
size_read = ret;
goto send_ack;
}
send_ack:
msg_recv->msg.payload.ack_msg.size = size_read;
ret = msgq_queue_message(&adspff->msgq_recv.msgq,
(msgq_message_t *)msg_recv);
if (ret < 0) {
pr_err("fread Enqueue failed %d.", ret);
kfree(message);
kfree(msg_recv);
return;
}
adspff->read_buf.write_index =
(adspff->read_buf.write_index + size_read) % ADSPFF_SHARED_BUFFER_SIZE;
nvadsp_mbox_send(&rx_mbox, adspff_cmd_ack,
NVADSP_MBOX_SMSG, 0, 0);
kfree(message);
kfree(msg_recv);
}
static struct workqueue_struct *adspff_wq;
DECLARE_WORK(fopen_work, adspff_fopen);
DECLARE_WORK(fwrite_work, adspff_fwrite);
DECLARE_WORK(fread_work, adspff_fread);
DECLARE_WORK(fclose_work, adspff_fclose);
/******************************************************************************
* ADSP mailbox message handler
******************************************************************************/
static int adspff_msg_handler(uint32_t msg, void *data)
{
unsigned long flags;
spin_lock_irqsave(&adspff_lock, flags);
switch (msg) {
case adspff_cmd_fopen: {
queue_work(adspff_wq, &fopen_work);
}
break;
case adspff_cmd_fclose: {
queue_work(adspff_wq, &fclose_work);
}
break;
case adspff_cmd_fwrite: {
queue_work(adspff_wq, &fwrite_work);
}
break;
case adspff_cmd_fread: {
queue_work(adspff_wq, &fread_work);
}
break;
default:
pr_err("Unsupported mbox msg %d.\n", msg);
}
spin_unlock_irqrestore(&adspff_lock, flags);
return 0;
}
int adspff_init(void)
{
int ret = 0;
nvadsp_app_handle_t handle;
nvadsp_app_info_t *app_info;
handle = nvadsp_app_load("adspff", "adspff.elf");
if (!handle)
return -1;
app_info = nvadsp_app_init(handle, NULL);
if (!app_info) {
pr_err("unable to init app adspff\n");
return -1;
}
adspff = ADSPFF_SHARED_STATE(app_info->mem.shared);
ret = nvadsp_mbox_open(&rx_mbox, &adspff->mbox_id,
"adspff", adspff_msg_handler, NULL);
if (ret < 0) {
pr_err("Failed to open mbox %d", adspff->mbox_id);
return -1;
}
if (adspff_wq == NULL)
adspff_wq = create_singlethread_workqueue("adspff_wq");
spin_lock_init(&adspff_lock);
return 0;
}
void adspff_exit(void)
{
nvadsp_mbox_close(&rx_mbox);
flush_workqueue(adspff_wq);
destroy_workqueue(adspff_wq);
}

View File

@@ -0,0 +1,138 @@
/*
* tegra_adspff.h - Shared ADSPFF interface between Tegra ADSP File
* System driver and ADSP side user space code.
* Copyright (c) 2016 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*/
#ifndef _TEGRA_ADSPFF_H_
#define _TEGRA_ADSPFF_H_
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* Defines
******************************************************************************/
/* TODO: fine tuning */
#define ADSPFF_MSG_QUEUE_WSIZE 1024
#define ADSPFF_WRITE_DATA_SIZE 512
#define ADSPFF_READ_DATA_SIZE 1024
#define ADSPFF_SHARED_BUFFER_SIZE (128 * 1024)
/**
* adspff_mbx_cmd: commands exchanged using mailbox.
*
* @adspff_cmd_fopen: open file on host
* @adspff_cmd_fclose: close file on host
* @adspff_cmd_fwrite: write data in an open file on host
* @adspff_cmd_fread: read data from an open file on host
*/
enum adspff_mbx_cmd {
adspff_cmd_fopen = 0,
adspff_cmd_fclose,
adspff_cmd_fwrite,
adspff_cmd_fread,
adspff_cmd_fopen_recv,
adspff_cmd_ack,
};
/******************************************************************************
* Types
******************************************************************************/
/* supported message payloads */
struct fopen_msg_t {
uint8_t fname[250];
uint8_t modes[2];
};
struct fwrite_msg_t {
int64_t file;
int32_t size;
};
struct fread_msg_t {
int64_t file;
int32_t size;
};
struct fclose_msg_t {
int64_t file;
};
struct fopen_recv_msg_t {
int64_t file;
};
struct ack_msg_t {
int32_t size;
};
#pragma pack(4)
/* app message definition */
union adspff_message_t {
msgq_message_t msgq_msg;
struct {
int32_t header[MSGQ_MESSAGE_HEADER_WSIZE];
union {
struct fopen_msg_t fopen_msg;
struct fwrite_msg_t fwrite_msg;
struct fread_msg_t fread_msg;
struct fclose_msg_t fclose_msg;
struct fopen_recv_msg_t fopen_recv_msg;
struct ack_msg_t ack_msg;
} payload;
} msg;
};
/* app queue definition */
union adspff_msgq_t {
msgq_t msgq;
struct {
int32_t header[MSGQ_HEADER_WSIZE];
int32_t queue[ADSPFF_MSG_QUEUE_WSIZE];
} app_msgq;
};
#pragma pack()
#define MSGQ_MSG_SIZE(x) \
(((sizeof(x) + sizeof(int32_t) - 1) & (~(sizeof(int32_t)-1))) >> 2)
/**
* ADSPFF state structure shared between ADSP & CPU
*/
typedef struct {
uint32_t write_index;
uint32_t read_index;
uint8_t data[ADSPFF_SHARED_BUFFER_SIZE];
} adspff_shared_buffer_t;
struct adspff_shared_state_t {
uint16_t mbox_id;
union adspff_msgq_t msgq_recv;
union adspff_msgq_t msgq_send;
adspff_shared_buffer_t write_buf;
adspff_shared_buffer_t read_buf;
};
#define ADSPFF_SHARED_STATE(x) \
((struct adspff_shared_state_t *)x)
#ifdef __cplusplus
}
#endif
#endif /* #ifndef TEGRA_ADSPFF_H_ */

View File

@@ -0,0 +1,184 @@
/*
* amc.c
*
* AMC and ARAM handling
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
#include <linux/irqchip/tegra-agic.h>
#include <linux/interrupt.h>
#include "dev.h"
#include "amc.h"
static struct platform_device *nvadsp_pdev;
static struct nvadsp_drv_data *nvadsp_drv_data;
static inline u32 amc_readl(u32 reg)
{
return readl(nvadsp_drv_data->base_regs[AMC] + reg);
}
static inline void amc_writel(u32 val, u32 reg)
{
writel(val, nvadsp_drv_data->base_regs[AMC] + reg);
}
static void wmemcpy_to_aram(u32 to_aram, const u32 *from_mem, size_t wlen)
{
u32 base, offset;
base = to_aram & AMC_ARAM_APERTURE_DATA_LEN;
amc_writel(base, AMC_ARAM_APERTURE_BASE);
offset = to_aram % AMC_ARAM_APERTURE_DATA_LEN;
while (wlen--) {
if (offset == AMC_ARAM_APERTURE_DATA_LEN) {
base += AMC_ARAM_APERTURE_DATA_LEN;
amc_writel(base, AMC_ARAM_APERTURE_BASE);
offset = 0;
}
amc_writel(*from_mem, AMC_ARAM_APERTURE_DATA_START + offset);
from_mem++;
offset += 4;
}
}
static void wmemcpy_from_aram(u32 *to_mem, const u32 from_aram, size_t wlen)
{
u32 base, offset;
base = from_aram & AMC_ARAM_APERTURE_DATA_LEN;
amc_writel(base, AMC_ARAM_APERTURE_BASE);
offset = from_aram % AMC_ARAM_APERTURE_DATA_LEN;
while (wlen--) {
if (offset == AMC_ARAM_APERTURE_DATA_LEN) {
base += AMC_ARAM_APERTURE_DATA_LEN;
amc_writel(base, AMC_ARAM_APERTURE_BASE);
offset = 0;
}
*to_mem = amc_readl(AMC_ARAM_APERTURE_DATA_START + offset);
to_mem++;
offset += 4;
}
}
int nvadsp_aram_save(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
wmemcpy_from_aram(d->state.aram, AMC_ARAM_START, AMC_ARAM_WSIZE);
return 0;
}
int nvadsp_aram_restore(struct platform_device *pdev)
{
struct nvadsp_drv_data *ndd = platform_get_drvdata(pdev);
wmemcpy_to_aram(AMC_ARAM_START, ndd->state.aram, AMC_ARAM_WSIZE);
return 0;
}
int nvadsp_amc_save(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
u32 val, offset = 0;
int i = 0;
offset = 0x0;
val = readl(d->base_regs[AMC] + offset);
d->state.amc_regs[i++] = val;
offset = 0x8;
val = readl(d->base_regs[AMC] + offset);
d->state.amc_regs[i++] = val;
return 0;
}
int nvadsp_amc_restore(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
u32 val, offset = 0;
int i = 0;
offset = 0x0;
val = d->state.amc_regs[i++];
writel(val, d->base_regs[AMC] + offset);
offset = 0x8;
val = d->state.amc_regs[i++];
writel(val, d->base_regs[AMC] + offset);
return 0;
}
static irqreturn_t nvadsp_amc_error_int_handler(int irq, void *devid)
{
u32 val, addr, status, intr = 0;
status = amc_readl(AMC_INT_STATUS);
addr = amc_readl(AMC_ERROR_ADDR);
if (status & AMC_INT_STATUS_ARAM) {
/*
* Ignore addresses lesser than AMC_ERROR_ADDR_IGNORE (4k)
* as those are spurious ones due a hardware issue.
*/
if (addr > AMC_ERROR_ADDR_IGNORE)
pr_info("nvadsp: invalid ARAM access. address: 0x%x\n",
addr);
intr |= AMC_INT_INVALID_ARAM_ACCESS;
}
if (status & AMC_INT_STATUS_REG) {
pr_info("nvadsp: invalid AMC reg access. address: 0x%x\n",
addr);
intr |= AMC_INT_INVALID_REG_ACCESS;
}
val = amc_readl(AMC_INT_CLR);
val |= intr;
amc_writel(val, AMC_INT_CLR);
return IRQ_HANDLED;
}
status_t __init nvadsp_amc_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
int ret = 0;
nvadsp_pdev = pdev;
nvadsp_drv_data = drv;
if (!of_device_is_compatible(node, "nvidia,tegra18x-adsp-hv")) {
dev_info(&pdev->dev, "Registering AMC Error Interrupt\n");
ret = request_irq(drv->agic_irqs[AMC_ERR_VIRQ],
nvadsp_amc_error_int_handler, 0, "AMC error int", pdev);
}
dev_info(&pdev->dev, "AMC/ARAM initialized.\n");
return ret;
}

View File

@@ -0,0 +1,58 @@
/*
* amc.h
*
* A header file for AMC/ARAM
*
* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_AMC_H
#define __TEGRA_NVADSP_AMC_H
#define AMC_CONFIG 0x00
#define AMC_CONFIG_ALIASING (1 << 0)
#define AMC_CONFIG_CARVEOUT (1 << 1)
#define AMC_CONFIG_ERR_RESP (1 << 2)
#define AMC_INT_STATUS (0x04)
#define AMC_INT_STATUS_ARAM (1 << 0)
#define AMC_INT_STATUS_REG (1 << 1)
#define AMC_INT_MASK 0x08
#define AMC_INT_SET 0x0C
#define AMC_INT_CLR 0x10
#define AMC_INT_INVALID_ARAM_ACCESS (1 << 0)
#define AMC_INT_INVALID_REG_ACCESS (1 << 1)
#define AMC_ERROR_ADDR 0x14
#define AMC_ERROR_ADDR_IGNORE SZ_4K
#define AMC_REGS 0x1000
#define AMC_ARAM_APERTURE_BASE 0x28
#define AMC_ARAM_APERTURE_DATA_START 0x800
#define AMC_ARAM_APERTURE_DATA_LEN 0x800 /* 2KB */
#define AMC_ARAM_ALIAS0 0x00400000
#define AMC_ARAM_ALIAS1 0x00500000
#define AMC_ARAM_ALIAS2 0x00600000
#define AMC_ARAM_ALIAS3 0x00700000
#define AMC_ARAM_START 0
#define AMC_ARAM_SIZE SZ_64K
#define AMC_ARAM_WSIZE (AMC_ARAM_SIZE >> 2)
int nvadsp_aram_save(struct platform_device *pdev);
int nvadsp_aram_restore(struct platform_device *pdev);
int nvadsp_amc_save(struct platform_device *pdev);
int nvadsp_amc_restore(struct platform_device *pdev);
#endif /* __TEGRA_NVADSP_AMC_H */

View File

@@ -0,0 +1,984 @@
/*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/platform/tegra/clock.h>
#include <linux/irqchip/tegra-agic.h>
#include <linux/irq.h>
#include "ape_actmon.h"
#include "dev.h"
#define ACTMON_DEV_CTRL 0x00
#define ACTMON_DEV_CTRL_ENB (0x1 << 31)
#define ACTMON_DEV_CTRL_UP_WMARK_NUM_SHIFT 26
#define ACTMON_DEV_CTRL_UP_WMARK_NUM_MASK (0x7 << 26)
#define ACTMON_DEV_CTRL_DOWN_WMARK_NUM_SHIFT 21
#define ACTMON_DEV_CTRL_DOWN_WMARK_NUM_MASK (0x7 << 21)
#define ACTMON_DEV_CTRL_UP_WMARK_ENB (0x1 << 19)
#define ACTMON_DEV_CTRL_DOWN_WMARK_ENB (0x1 << 18)
#define ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB (0x1 << 17)
#define ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB (0x1 << 16)
#define ACTMON_DEV_CTRL_AT_END_ENB (0x1 << 15)
#define ACTMON_DEV_CTRL_PERIODIC_ENB (0x1 << 13)
#define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
#define ACTMON_DEV_CTRL_K_VAL_MASK (0x7 << 10)
#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT (0)
#define ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK (0xff << 0)
#define ACTMON_DEV_UP_WMARK 0x04
#define ACTMON_DEV_DOWN_WMARK 0x08
#define ACTMON_DEV_AVG_UP_WMARK 0x0c
#define ACTMON_DEV_AVG_DOWN_WMARK 0x10
#define ACTMON_DEV_INIT_AVG 0x14
#define ACTMON_DEV_COUNT 0x18
#define ACTMON_DEV_AVG_COUNT 0x1c
#define ACTMON_DEV_INTR_STATUS 0x20
#define ACTMON_DEV_INTR_UP_WMARK (0x1 << 31)
#define ACTMON_DEV_INTR_DOWN_WMARK (0x1 << 30)
#define ACTMON_DEV_INTR_AVG_DOWN_WMARK (0x1 << 29)
#define ACTMON_DEV_INTR_AVG_UP_WMARK (0x1 << 28)
#define ACTMON_DEV_COUNT_WEGHT 0x24
#define ACTMON_DEV_SAMPLE_CTRL 0x28
#define ACTMON_DEV_SAMPLE_CTRL_TICK_65536 (0x1 << 2)
#define ACTMON_DEV_SAMPLE_CTRL_TICK_256 (0x0 << 1)
#define AMISC_ACTMON_0 0x54
#define AMISC_ACTMON_CNT_TARGET_ENABLE (0x1 << 31)
#define ACTMON_DEFAULT_AVG_WINDOW_LOG2 7
/* 1/10 of % i.e 60 % of max freq */
#define ACTMON_DEFAULT_AVG_BAND 6
#define ACTMON_MAX_REG_OFFSET 0x2c
/* TBD: These would come via dts file */
#define ACTMON_REG_OFFSET 0x800
/* milli second divider as SAMPLE_TICK*/
#define SAMPLE_MS_DIVIDER 65536
/* Sample period in ms */
#define ACTMON_DEFAULT_SAMPLING_PERIOD 20
#define AVG_COUNT_THRESHOLD 100000
static struct actmon ape_actmon;
static struct actmon *apemon;
/* APE activity monitor: Samples ADSP activity */
static struct actmon_dev actmon_dev_adsp = {
.reg = 0x000,
.clk_name = "adsp_cpu",
/* ADSP suspend activity floor */
.suspend_freq = 51200,
/* min step by which we want to boost in case of sudden boost request */
.boost_freq_step = 51200,
/* % of boost freq for boosting up */
.boost_up_coef = 200,
/*
* % of boost freq for boosting down. Should be boosted down by
* exponential down
*/
.boost_down_coef = 80,
/*
* % of device freq collected in a sample period set as boost up
* threshold. boost interrupt is generated when actmon_count
* (absolute actmon count in a sample period)
* crosses this threshold consecutively by up_wmark_window.
*/
.boost_up_threshold = 95,
/*
* % of device freq collected in a sample period set as boost down
* threshold. boost interrupt is generated when actmon_count(raw_count)
* crosses this threshold consecutively by down_wmark_window.
*/
.boost_down_threshold = 80,
/*
* No of times raw counts hits the up_threshold to generate an
* interrupt
*/
.up_wmark_window = 4,
/*
* No of times raw counts hits the down_threshold to generate an
* interrupt.
*/
.down_wmark_window = 8,
/*
* No of samples = 2^ avg_window_log2 for calculating exponential moving
* average.
*/
.avg_window_log2 = ACTMON_DEFAULT_AVG_WINDOW_LOG2,
/*
* "weight" is used to scale the count to match the device freq
* When 256 adsp active cpu clock are generated, actmon count
* is increamented by 1. Making weight as 256 ensures that 1 adsp active
* clk increaments actmon_count by 1.
* This makes actmon_count exactly reflect active adsp cpu clk
* cycles.
*/
.count_weight = 0x100,
/*
* FREQ_SAMPLER: samples number of device(adsp) active cycles
* weighted by count_weight to reflect * actmon_count within a
* sample period.
* LOAD_SAMPLER: samples actmon active cycles weighted by
* count_weight to reflect actmon_count within a sample period.
*/
.type = ACTMON_FREQ_SAMPLER,
.state = ACTMON_UNINITIALIZED,
};
static struct actmon_dev *actmon_devices[] = {
&actmon_dev_adsp,
};
static inline u32 actmon_readl(u32 offset)
{
return __raw_readl(apemon->base + offset);
}
static inline void actmon_writel(u32 val, u32 offset)
{
__raw_writel(val, apemon->base + offset);
}
static inline void actmon_wmb(void)
{
wmb();
}
#define offs(x) (dev->reg + x)
static inline unsigned long do_percent(unsigned long val, unsigned int pct)
{
return val * pct / 100;
}
static void actmon_update_sample_period(unsigned long period)
{
u32 sample_period_in_clks;
u32 val = 0;
apemon->sampling_period = period;
/*
* sample_period_in_clks <1..255> = (actmon_clk_freq<1..40800> *
* actmon_sample_period <10ms..40ms>) / SAMPLE_MS_DIVIDER(65536)
*/
sample_period_in_clks = (apemon->freq * apemon->sampling_period) /
SAMPLE_MS_DIVIDER;
val = actmon_readl(ACTMON_DEV_CTRL);
val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
val |= (sample_period_in_clks <<
ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
& ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
actmon_writel(val, ACTMON_DEV_CTRL);
}
static inline void actmon_dev_up_wmark_set(struct actmon_dev *dev)
{
u32 val;
unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
dev->cur_freq : apemon->freq;
val = freq * apemon->sampling_period;
actmon_writel(do_percent(val, dev->boost_up_threshold),
offs(ACTMON_DEV_UP_WMARK));
}
static inline void actmon_dev_down_wmark_set(struct actmon_dev *dev)
{
u32 val;
unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
dev->cur_freq : apemon->freq;
val = freq * apemon->sampling_period;
actmon_writel(do_percent(val, dev->boost_down_threshold),
offs(ACTMON_DEV_DOWN_WMARK));
}
static inline void actmon_dev_wmark_set(struct actmon_dev *dev)
{
u32 val;
unsigned long freq = (dev->type == ACTMON_FREQ_SAMPLER) ?
dev->cur_freq : apemon->freq;
val = freq * apemon->sampling_period;
actmon_writel(do_percent(val, dev->boost_up_threshold),
offs(ACTMON_DEV_UP_WMARK));
actmon_writel(do_percent(val, dev->boost_down_threshold),
offs(ACTMON_DEV_DOWN_WMARK));
}
static inline void actmon_dev_avg_wmark_set(struct actmon_dev *dev)
{
/*
* band: delta from current count to be set for avg upper
* and lower thresholds
*/
u32 band = dev->avg_band_freq * apemon->sampling_period;
u32 avg = dev->avg_count;
actmon_writel(avg + band, offs(ACTMON_DEV_AVG_UP_WMARK));
avg = max(avg, band);
actmon_writel(avg - band, offs(ACTMON_DEV_AVG_DOWN_WMARK));
}
static unsigned long actmon_dev_avg_freq_get(struct actmon_dev *dev)
{
u64 val;
if (dev->type == ACTMON_FREQ_SAMPLER)
return dev->avg_count / apemon->sampling_period;
val = (u64) dev->avg_count * dev->cur_freq;
do_div(val , apemon->freq * apemon->sampling_period);
return (u32)val;
}
/* Activity monitor sampling operations */
static irqreturn_t ape_actmon_dev_isr(int irq, void *dev_id)
{
u32 val, devval;
unsigned long flags;
struct actmon_dev *dev = (struct actmon_dev *)dev_id;
spin_lock_irqsave(&dev->lock, flags);
val = actmon_readl(offs(ACTMON_DEV_INTR_STATUS));
actmon_writel(val, offs(ACTMON_DEV_INTR_STATUS)); /* clr all */
devval = actmon_readl(offs(ACTMON_DEV_CTRL));
if (val & ACTMON_DEV_INTR_AVG_UP_WMARK) {
devval |= (ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
dev->avg_count = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
actmon_dev_avg_wmark_set(dev);
} else if (val & ACTMON_DEV_INTR_AVG_DOWN_WMARK) {
devval |= (ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
dev->avg_count = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
actmon_dev_avg_wmark_set(dev);
}
if (val & ACTMON_DEV_INTR_UP_WMARK) {
devval |= (ACTMON_DEV_CTRL_UP_WMARK_ENB |
ACTMON_DEV_CTRL_DOWN_WMARK_ENB);
dev->boost_freq = dev->boost_freq_step +
do_percent(dev->boost_freq, dev->boost_up_coef);
if (dev->boost_freq >= dev->max_freq) {
dev->boost_freq = dev->max_freq;
devval &= ~ACTMON_DEV_CTRL_UP_WMARK_ENB;
}
} else if (val & ACTMON_DEV_INTR_DOWN_WMARK) {
devval |= (ACTMON_DEV_CTRL_UP_WMARK_ENB |
ACTMON_DEV_CTRL_DOWN_WMARK_ENB);
dev->boost_freq =
do_percent(dev->boost_freq, dev->boost_down_coef);
if (dev->boost_freq == 0) {
devval &= ~ACTMON_DEV_CTRL_DOWN_WMARK_ENB;
}
}
actmon_writel(devval, offs(ACTMON_DEV_CTRL));
actmon_wmb();
spin_unlock_irqrestore(&dev->lock, flags);
return IRQ_WAKE_THREAD;
}
static irqreturn_t ape_actmon_dev_fn(int irq, void *dev_id)
{
unsigned long flags, freq;
struct actmon_dev *dev = (struct actmon_dev *)dev_id;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != ACTMON_ON) {
spin_unlock_irqrestore(&dev->lock, flags);
return IRQ_HANDLED;
}
freq = actmon_dev_avg_freq_get(dev);
dev->avg_actv_freq = freq; /* in kHz */
freq = do_percent(freq, dev->avg_sustain_coef);
freq += dev->boost_freq;
dev->target_freq = freq;
spin_unlock_irqrestore(&dev->lock, flags);
dev_dbg(dev->device, "%s(kHz): avg: %lu, boost: %lu, target: %lu, current: %lu\n",
dev->clk_name, dev->avg_actv_freq, dev->boost_freq, dev->target_freq,
dev->cur_freq);
#if defined(CONFIG_TEGRA_ADSP_DFS)
adsp_cpu_set_rate(freq);
#endif
return IRQ_HANDLED;
}
/* Activity monitor configuration and control */
static void actmon_dev_configure(struct actmon_dev *dev,
unsigned long freq)
{
u32 val;
dev->boost_freq = 0;
dev->cur_freq = freq;
dev->target_freq = freq;
dev->avg_actv_freq = freq;
if (dev->type == ACTMON_FREQ_SAMPLER) {
/*
* max actmon count = (count_weight * adsp_freq (khz)
* sample_period (ms)) / (PULSE_N_CLK+1)
* As Count_weight is set as 256(0x100) and
* (PULSE_N_CLK+1) = 256. both would be
* compensated while coming up max_actmon_count.
* in other word
* max actmon count = ((count_weight * adsp_freq *
* sample_period_reg * SAMPLE_TICK)
* / (ape_freq * (PULSE_N_CLK+1)))
* where -
* sample_period_reg : <1..255> sample period in no of
* actmon clocks per sample
* SAMPLE_TICK : Arbtrary value for ms - 65536, us - 256
* (PULSE_N_CLK + 1) : 256 - No of adsp "active" clocks to
* increament raw_count/ actmon_count
* by one.
*/
dev->avg_count = dev->cur_freq * apemon->sampling_period;
dev->avg_band_freq = dev->max_freq *
ACTMON_DEFAULT_AVG_BAND / 1000;
} else {
dev->avg_count = apemon->freq * apemon->sampling_period;
dev->avg_band_freq = apemon->freq *
ACTMON_DEFAULT_AVG_BAND / 1000;
}
actmon_writel(dev->avg_count, offs(ACTMON_DEV_INIT_AVG));
BUG_ON(!dev->boost_up_threshold);
dev->avg_sustain_coef = 100 * 100 / dev->boost_up_threshold;
actmon_dev_avg_wmark_set(dev);
actmon_dev_wmark_set(dev);
actmon_writel(dev->count_weight, offs(ACTMON_DEV_COUNT_WEGHT));
val = actmon_readl(ACTMON_DEV_CTRL);
val |= (ACTMON_DEV_CTRL_PERIODIC_ENB |
ACTMON_DEV_CTRL_AVG_UP_WMARK_ENB |
ACTMON_DEV_CTRL_AVG_DOWN_WMARK_ENB);
val |= ((dev->avg_window_log2 - 1) << ACTMON_DEV_CTRL_K_VAL_SHIFT) &
ACTMON_DEV_CTRL_K_VAL_MASK;
val |= ((dev->down_wmark_window - 1) <<
ACTMON_DEV_CTRL_DOWN_WMARK_NUM_SHIFT) &
ACTMON_DEV_CTRL_DOWN_WMARK_NUM_MASK;
val |= ((dev->up_wmark_window - 1) <<
ACTMON_DEV_CTRL_UP_WMARK_NUM_SHIFT) &
ACTMON_DEV_CTRL_UP_WMARK_NUM_MASK;
val |= ACTMON_DEV_CTRL_DOWN_WMARK_ENB |
ACTMON_DEV_CTRL_UP_WMARK_ENB;
actmon_writel(val, offs(ACTMON_DEV_CTRL));
actmon_wmb();
}
static void actmon_dev_enable(struct actmon_dev *dev)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state == ACTMON_OFF) {
dev->state = ACTMON_ON;
val = actmon_readl(offs(ACTMON_DEV_CTRL));
val |= ACTMON_DEV_CTRL_ENB;
actmon_writel(val, offs(ACTMON_DEV_CTRL));
actmon_wmb();
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static void actmon_dev_disable(struct actmon_dev *dev)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state == ACTMON_ON) {
dev->state = ACTMON_OFF;
val = actmon_readl(offs(ACTMON_DEV_CTRL));
val &= ~ACTMON_DEV_CTRL_ENB;
actmon_writel(val, offs(ACTMON_DEV_CTRL));
actmon_writel(0xffffffff, offs(ACTMON_DEV_INTR_STATUS));
actmon_wmb();
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static int actmon_dev_probe(struct actmon_dev *dev)
{
struct nvadsp_drv_data *drv_data = dev_get_drvdata(dev->device);
int ret;
dev->irq = drv_data->agic_irqs[ACTMON_VIRQ];
ret = request_threaded_irq(dev->irq, ape_actmon_dev_isr,
ape_actmon_dev_fn, IRQ_TYPE_LEVEL_HIGH,
dev->clk_name, dev);
if (ret) {
dev_err(dev->device, "Failed irq %d request for %s\n", dev->irq,
dev->clk_name);
goto end;
}
disable_irq(dev->irq);
end:
return ret;
}
static int actmon_dev_init(struct actmon_dev *dev)
{
int ret = -EINVAL;
unsigned long freq;
spin_lock_init(&dev->lock);
dev->clk = clk_get_sys(NULL, dev->clk_name);
if (IS_ERR_OR_NULL(dev->clk)) {
dev_err(dev->device, "Failed to find %s clock\n",
dev->clk_name);
goto end;
}
ret = clk_prepare_enable(dev->clk);
if (ret) {
dev_err(dev->device, "unable to enable %s clock\n",
dev->clk_name);
goto err_enable;
}
dev->max_freq = freq = clk_get_rate(dev->clk) / 1000;
actmon_dev_configure(dev, freq);
dev->state = ACTMON_OFF;
actmon_dev_enable(dev);
enable_irq(dev->irq);
return 0;
err_enable:
clk_put(dev->clk);
end:
return ret;
}
#ifdef CONFIG_DEBUG_FS
#define RW_MODE (S_IWUSR | S_IRUSR)
#define RO_MODE S_IRUSR
static struct dentry *clk_debugfs_root;
static int type_show(struct seq_file *s, void *data)
{
struct actmon_dev *dev = s->private;
seq_printf(s, "%s\n", (dev->type == ACTMON_LOAD_SAMPLER) ?
"Load Activity Monitor" : "Frequency Activity Monitor");
return 0;
}
static int type_open(struct inode *inode, struct file *file)
{
return single_open(file, type_show, inode->i_private);
}
static const struct file_operations type_fops = {
.open = type_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int actv_get(void *data, u64 *val)
{
unsigned long flags;
struct actmon_dev *dev = data;
spin_lock_irqsave(&dev->lock, flags);
*val = actmon_dev_avg_freq_get(dev);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(actv_fops, actv_get, NULL, "%llu\n");
static int step_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->boost_freq_step * 100 / dev->max_freq;
return 0;
}
static int step_set(void *data, u64 val)
{
unsigned long flags;
struct actmon_dev *dev = data;
if (val > 100)
val = 100;
spin_lock_irqsave(&dev->lock, flags);
dev->boost_freq_step = do_percent(dev->max_freq, (unsigned int)val);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(step_fops, step_get, step_set, "%llu\n");
static int count_weight_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->count_weight;
return 0;
}
static int count_weight_set(void *data, u64 val)
{
unsigned long flags;
struct actmon_dev *dev = data;
spin_lock_irqsave(&dev->lock, flags);
dev->count_weight = (u32) val;
actmon_writel(dev->count_weight, offs(ACTMON_DEV_COUNT_WEGHT));
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cnt_wt_fops, count_weight_get,
count_weight_set, "%llu\n");
static int up_threshold_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->boost_up_threshold;
return 0;
}
static int up_threshold_set(void *data, u64 val)
{
unsigned long flags;
struct actmon_dev *dev = data;
unsigned int up_threshold = (unsigned int)val;
if (up_threshold > 100)
up_threshold = 100;
spin_lock_irqsave(&dev->lock, flags);
if (up_threshold <= dev->boost_down_threshold)
up_threshold = dev->boost_down_threshold;
if (up_threshold)
dev->avg_sustain_coef = 100 * 100 / up_threshold;
dev->boost_up_threshold = up_threshold;
actmon_dev_up_wmark_set(dev);
actmon_wmb();
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(up_threshold_fops, up_threshold_get,
up_threshold_set, "%llu\n");
static int down_threshold_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->boost_down_threshold;
return 0;
}
static int down_threshold_set(void *data, u64 val)
{
unsigned long flags;
struct actmon_dev *dev = data;
unsigned int down_threshold = (unsigned int)val;
spin_lock_irqsave(&dev->lock, flags);
if (down_threshold >= dev->boost_up_threshold)
down_threshold = dev->boost_up_threshold;
dev->boost_down_threshold = down_threshold;
actmon_dev_down_wmark_set(dev);
actmon_wmb();
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(down_threshold_fops, down_threshold_get,
down_threshold_set, "%llu\n");
static int state_get(void *data, u64 *val)
{
struct actmon_dev *dev = data;
*val = dev->state;
return 0;
}
static int state_set(void *data, u64 val)
{
struct actmon_dev *dev = data;
if (val)
actmon_dev_enable(dev);
else
actmon_dev_disable(dev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, state_set, "%llu\n");
/* Get period in msec */
static int period_get(void *data, u64 *val)
{
*val = apemon->sampling_period;
return 0;
}
/* Set period in msec */
static int period_set(void *data, u64 val)
{
int i;
unsigned long flags;
u8 period = (u8)val;
if (period) {
actmon_update_sample_period(period);
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
struct actmon_dev *dev = actmon_devices[i];
spin_lock_irqsave(&dev->lock, flags);
actmon_dev_wmark_set(dev);
spin_unlock_irqrestore(&dev->lock, flags);
}
actmon_wmb();
return 0;
}
return -EINVAL;
}
DEFINE_SIMPLE_ATTRIBUTE(period_fops, period_get, period_set, "%llu\n");
static int actmon_debugfs_create_dev(struct actmon_dev *dev)
{
struct dentry *dir, *d;
if (dev->state == ACTMON_UNINITIALIZED)
return 0;
dir = debugfs_create_dir(dev->clk_name, clk_debugfs_root);
if (!dir)
return -ENOMEM;
d = debugfs_create_file(
"actv_type", RO_MODE, dir, dev, &type_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"avg_activity", RO_MODE, dir, dev, &actv_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"boost_step", RW_MODE, dir, dev, &step_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_u32(
"boost_rate_dec", RW_MODE, dir, (u32 *)&dev->boost_down_coef);
if (!d)
return -ENOMEM;
d = debugfs_create_u32(
"boost_rate_inc", RW_MODE, dir, (u32 *)&dev->boost_up_coef);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"boost_threshold_dn", RW_MODE, dir, dev, &down_threshold_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"boost_threshold_up", RW_MODE, dir, dev, &up_threshold_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"state", RW_MODE, dir, dev, &state_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file(
"cnt_wt", RW_MODE, dir, dev, &cnt_wt_fops);
if (!d)
return -ENOMEM;
return 0;
}
static int actmon_debugfs_init(struct nvadsp_drv_data *drv)
{
int i;
int ret = -ENOMEM;
struct dentry *d;
if (!drv->adsp_debugfs_root)
return ret;
d = debugfs_create_dir("adsp_actmon", drv->adsp_debugfs_root);
if (!d)
return ret;
clk_debugfs_root = d;
d = debugfs_create_file("period", RW_MODE, d, NULL, &period_fops);
if (!d)
goto err_out;
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
ret = actmon_debugfs_create_dev(actmon_devices[i]);
if (ret)
goto err_out;
}
return 0;
err_out:
debugfs_remove_recursive(clk_debugfs_root);
return ret;
}
#endif
/* freq in KHz */
void actmon_rate_change(unsigned long freq, bool override)
{
struct actmon_dev *dev = &actmon_dev_adsp;
unsigned long flags;
if (override) {
actmon_dev_disable(dev);
spin_lock_irqsave(&dev->lock, flags);
dev->cur_freq = freq;
dev->avg_count = freq * apemon->sampling_period;
actmon_writel(dev->avg_count, offs(ACTMON_DEV_INIT_AVG));
actmon_dev_avg_wmark_set(dev);
actmon_dev_wmark_set(dev);
actmon_wmb();
spin_unlock_irqrestore(&dev->lock, flags);
actmon_dev_enable(dev);
} else {
spin_lock_irqsave(&dev->lock, flags);
dev->cur_freq = freq;
if (dev->state == ACTMON_ON) {
actmon_dev_wmark_set(dev);
actmon_wmb();
}
spin_unlock_irqrestore(&dev->lock, flags);
}
/* change ape rate as half of adsp rate */
clk_set_rate(apemon->clk, freq * 500);
};
int ape_actmon_probe(struct platform_device *pdev)
{
int ret = 0;
int i;
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
actmon_devices[i]->device = &pdev->dev;
ret = actmon_dev_probe(actmon_devices[i]);
dev_dbg(&pdev->dev, "%s actmon: %s probe (%d)\n",
actmon_devices[i]->clk_name, ret ? "Failed" : "Completed", ret);
}
return ret;
}
static int ape_actmon_rc_cb(
struct notifier_block *nb, unsigned long rate, void *v)
{
struct actmon_dev *dev = &actmon_dev_adsp;
unsigned long flags;
u32 init_cnt;
if (dev->state != ACTMON_ON) {
dev_dbg(dev->device, "adsp actmon is not ON\n");
goto exit_out;
}
actmon_dev_disable(dev);
spin_lock_irqsave(&dev->lock, flags);
init_cnt = actmon_readl(offs(ACTMON_DEV_AVG_COUNT));
/* update sample period to maintain number of clock */
apemon->freq = rate / 1000; /* in KHz */
actmon_update_sample_period(ACTMON_DEFAULT_SAMPLING_PERIOD);
actmon_writel(init_cnt, offs(ACTMON_DEV_INIT_AVG));
spin_unlock_irqrestore(&dev->lock, flags);
actmon_dev_enable(dev);
exit_out:
return NOTIFY_OK;
}
int ape_actmon_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
static void __iomem *amisc_base;
u32 sample_period_in_clks;
struct clk *p;
u32 val = 0;
int i, ret;
if (drv->actmon_initialized)
return 0;
apemon = &ape_actmon;
apemon->base = drv->base_regs[AMISC] + ACTMON_REG_OFFSET;
amisc_base = drv->base_regs[AMISC];
apemon->clk = clk_get_sys(NULL, "adsp.ape");
if (!apemon->clk) {
dev_err(&pdev->dev, "Failed to find actmon clock\n");
ret = -EINVAL;
goto err_out;
}
ret = clk_prepare_enable(apemon->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to enable actmon clock\n");
ret = -EINVAL;
goto err_out;
}
apemon->clk_rc_nb.notifier_call = ape_actmon_rc_cb;
/*
* "adsp.ape" clk is shared bus user clock and "ape" is bus clock
* but rate change notification should come from bus clock itself.
*/
p = clk_get_parent(apemon->clk);
if (!p) {
dev_err(&pdev->dev, "Failed to find actmon parent clock\n");
ret = -EINVAL;
goto clk_err_out;
}
ret = tegra_register_clk_rate_notifier(p, &apemon->clk_rc_nb);
if (ret) {
dev_err(&pdev->dev, "Registration fail: %s rate change notifier for %s\n",
p->name, apemon->clk->name);
goto clk_err_out;
}
apemon->freq = clk_get_rate(apemon->clk) / 1000; /* in KHz */
apemon->sampling_period = ACTMON_DEFAULT_SAMPLING_PERIOD;
/*
* sample period as no of actmon clocks
* Actmon is derived from APE clk.
* suppose APE clk is 204MHz = 204000 KHz and want to calculate
* clocks in 10ms sample
* in 1ms = 204000 cycles
* 10ms = 204000 * 10 APE cycles
* SAMPLE_MS_DIVIDER is an arbitrary number
*/
sample_period_in_clks = (apemon->freq * apemon->sampling_period)
/ SAMPLE_MS_DIVIDER;
/* set ms mode */
actmon_writel(ACTMON_DEV_SAMPLE_CTRL_TICK_65536,
ACTMON_DEV_SAMPLE_CTRL);
val = actmon_readl(ACTMON_DEV_CTRL);
val &= ~ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
val |= (sample_period_in_clks <<
ACTMON_DEV_CTRL_SAMPLE_PERIOD_VAL_SHIFT)
& ACTMON_DEV_CTRL_SAMPLE_PERIOD_MASK;
actmon_writel(val, ACTMON_DEV_CTRL);
/* Enable AMISC_ACTMON */
val = __raw_readl(amisc_base + AMISC_ACTMON_0);
val |= AMISC_ACTMON_CNT_TARGET_ENABLE;
__raw_writel(val, amisc_base + AMISC_ACTMON_0);
actmon_writel(0xffffffff, ACTMON_DEV_INTR_STATUS); /* clr all */
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
ret = actmon_dev_init(actmon_devices[i]);
dev_dbg(&pdev->dev, "%s actmon device: %s initialization (%d)\n",
actmon_devices[i]->clk_name, ret ? "Failed" : "Completed", ret);
}
#ifdef CONFIG_DEBUG_FS
actmon_debugfs_init(drv);
#endif
drv->actmon_initialized = true;
dev_dbg(&pdev->dev, "adsp actmon initialized ....\n");
return 0;
clk_err_out:
if (apemon->clk)
clk_disable_unprepare(apemon->clk);
err_out:
if (apemon->clk)
clk_put(apemon->clk);
return ret;
}
int ape_actmon_exit(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
struct actmon_dev *dev;
status_t ret = 0;
int i;
/* return if actmon is not initialized */
if (!drv->actmon_initialized)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(actmon_devices); i++) {
dev = actmon_devices[i];
actmon_dev_disable(dev);
disable_irq(dev->irq);
clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
}
tegra_unregister_clk_rate_notifier(clk_get_parent(apemon->clk),
&apemon->clk_rc_nb);
clk_disable_unprepare(apemon->clk);
clk_put(apemon->clk);
drv->actmon_initialized = false;
dev_dbg(&pdev->dev, "adsp actmon has exited ....\n");
return ret;
}

View File

@@ -0,0 +1,86 @@
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __APE_ACTMON_H
#define __APE_ACTMON_H
#include <linux/spinlock.h>
enum actmon_type {
ACTMON_LOAD_SAMPLER,
ACTMON_FREQ_SAMPLER,
};
enum actmon_state {
ACTMON_UNINITIALIZED = -1,
ACTMON_OFF = 0,
ACTMON_ON = 1,
ACTMON_SUSPENDED = 2,
};
/* Units:
* - frequency in kHz
* - coefficients, and thresholds in %
* - sampling period in ms
* - window in sample periods (value = setting + 1)
*/
struct actmon_dev {
u32 reg;
int irq;
struct device *device;
const char *dev_id;
const char *con_id;
const char *clk_name;
struct clk *clk;
unsigned long max_freq;
unsigned long target_freq;
unsigned long cur_freq;
unsigned long suspend_freq;
unsigned long avg_actv_freq;
unsigned long avg_band_freq;
unsigned int avg_sustain_coef;
u32 avg_count;
unsigned long boost_freq;
unsigned long boost_freq_step;
unsigned int boost_up_coef;
unsigned int boost_down_coef;
unsigned int boost_up_threshold;
unsigned int boost_down_threshold;
u8 up_wmark_window;
u8 down_wmark_window;
u8 avg_window_log2;
u32 count_weight;
enum actmon_type type;
enum actmon_state state;
enum actmon_state saved_state;
spinlock_t lock;
};
struct actmon {
struct clk *clk;
unsigned long freq;
unsigned long sampling_period;
struct notifier_block clk_rc_nb;
void __iomem *base;
};
int ape_actmon_init(struct platform_device *pdev);
int ape_actmon_exit(struct platform_device *pdev);
void actmon_rate_change(unsigned long freq, bool override);
#endif

View File

@@ -0,0 +1,988 @@
/*
* run_app.c
*
* ADSP OS App management
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <linux/tegra_nvadsp.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/dma-buf.h>
#include <linux/slab.h>
#include <linux/elf.h>
#include "aram_manager.h"
#include "os.h"
#include "dev.h"
#include "adsp_shared_struct.h"
#define DYN_APP_EXTN ".elf"
/*
* structure to hold the list of app binaries loaded and
* its associated instances.
*/
struct nvadsp_app_service {
char name[NVADSP_NAME_SZ];
struct list_head node;
int instance;
struct mutex lock;
struct list_head app_head;
const uint32_t token;
const struct app_mem_size *mem_size;
int generated_instance_id;
struct adsp_module *mod;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
};
/* nvadsp app loader private structure */
struct nvadsp_app_priv_struct {
struct platform_device *pdev;
struct completion os_load_complete;
struct nvadsp_mbox mbox;
struct list_head service_list;
struct mutex service_lock_list;
#ifdef CONFIG_DEBUG_FS
struct dentry *adsp_app_debugfs_root;
#endif
};
static struct nvadsp_app_priv_struct priv;
static void delete_app_instance(nvadsp_app_info_t *);
#ifdef CONFIG_DEBUG_FS
static int dump_binary_in_2bytes_app_file_node(struct seq_file *s, void *data)
{
struct nvadsp_app_service *ser = s->private;
struct adsp_module *mod = ser->mod;
u32 adsp_ptr;
u16 *ptr;
int i;
adsp_ptr = mod->adsp_module_ptr;
ptr = (u16 *)mod->module_ptr;
for (i = 0; i < mod->size; i += 2)
seq_printf(s, "0x%x : 0x%04x\n", adsp_ptr + i, *(ptr + i));
return 0;
}
static int dump_binary_in_words_app_file_node(struct seq_file *s, void *data)
{
struct nvadsp_app_service *ser = s->private;
struct adsp_module *mod = ser->mod;
u32 adsp_ptr;
u32 *ptr;
int i;
adsp_ptr = mod->adsp_module_ptr;
ptr = (u32 *)mod->module_ptr;
for (i = 0; i < mod->size; i += 4)
seq_printf(s, "0x%x : 0x%08x\n", adsp_ptr + i, *(ptr + i));
return 0;
}
static int host_load_addr_app_file_node(struct seq_file *s, void *data)
{
struct nvadsp_app_service *ser = s->private;
struct adsp_module *mod = ser->mod;
seq_printf(s, "%p\n", mod->module_ptr);
return 0;
}
static int adsp_load_addr_app_file_node(struct seq_file *s, void *data)
{
struct nvadsp_app_service *ser = s->private;
struct adsp_module *mod = ser->mod;
seq_printf(s, "0x%x\n", mod->adsp_module_ptr);
return 0;
}
static int size_app_file_node(struct seq_file *s, void *data)
{
struct nvadsp_app_service *ser = s->private;
struct adsp_module *mod = ser->mod;
seq_printf(s, "%lu\n", mod->size);
return 0;
}
static int dram_app_file_node(struct seq_file *s, void *data)
{
const struct app_mem_size *mem_size = s->private;
seq_printf(s, "%llu\n", mem_size->dram);
return 0;
}
static int dram_shared_app_file_node(struct seq_file *s, void *data)
{
const struct app_mem_size *mem_size = s->private;
seq_printf(s, "%llu\n", mem_size->dram_shared);
return 0;
}
static int dram_shared_wc_app_file_node(struct seq_file *s, void *data)
{
const struct app_mem_size *mem_size = s->private;
seq_printf(s, "%llu\n", mem_size->dram_shared_wc);
return 0;
}
static int aram_app_file_node(struct seq_file *s, void *data)
{
const struct app_mem_size *mem_size = s->private;
seq_printf(s, "%llu\n", mem_size->aram);
return 0;
}
static int aram_exclusive_app_file_node(struct seq_file *s, void *data)
{
const struct app_mem_size *mem_size = s->private;
seq_printf(s, "%llu\n", mem_size->aram_x);
return 0;
}
#define ADSP_APP_CREATE_FOLDER(x, root) \
do {\
x = debugfs_create_dir(#x, root); \
if (IS_ERR_OR_NULL(x)) { \
dev_err(dev, "unable to create app %s folder\n", #x); \
ret = -ENOENT; \
goto rm_debug_root; \
} \
} while (0)
#define ADSP_APP_CREATE_FILE(x, priv, root) \
do { \
if (IS_ERR_OR_NULL(debugfs_create_file(#x, S_IRUSR, root, \
priv, &x##_node_operations))) { \
dev_err(dev, "unable tp create app %s file\n", #x); \
ret = -ENOENT; \
goto rm_debug_root; \
} \
} while (0)
#define ADSP_APP_FILE_OPERATION(x) \
static int x##_open(struct inode *inode, struct file *file) \
{ \
return single_open(file, x##_app_file_node, inode->i_private); \
} \
\
static const struct file_operations x##_node_operations = { \
.open = x##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
};
ADSP_APP_FILE_OPERATION(dump_binary_in_2bytes);
ADSP_APP_FILE_OPERATION(dump_binary_in_words);
ADSP_APP_FILE_OPERATION(host_load_addr);
ADSP_APP_FILE_OPERATION(adsp_load_addr);
ADSP_APP_FILE_OPERATION(size);
ADSP_APP_FILE_OPERATION(dram);
ADSP_APP_FILE_OPERATION(dram_shared);
ADSP_APP_FILE_OPERATION(dram_shared_wc);
ADSP_APP_FILE_OPERATION(aram);
ADSP_APP_FILE_OPERATION(aram_exclusive);
static int create_adsp_app_debugfs(struct nvadsp_app_service *ser)
{
struct app_mem_size *mem_size = (struct app_mem_size *)ser->mem_size;
struct device *dev = &priv.pdev->dev;
struct dentry *instance_mem_sizes;
struct dentry *root;
int ret = 0;
root = debugfs_create_dir(ser->name,
priv.adsp_app_debugfs_root);
if (IS_ERR_OR_NULL(root)) {
ret = -EINVAL;
goto err_out;
}
ADSP_APP_CREATE_FILE(dump_binary_in_2bytes, ser, root);
ADSP_APP_CREATE_FILE(dump_binary_in_words, ser, root);
ADSP_APP_CREATE_FILE(host_load_addr, ser, root);
ADSP_APP_CREATE_FILE(adsp_load_addr, ser, root);
ADSP_APP_CREATE_FILE(size, ser, root);
ADSP_APP_CREATE_FOLDER(instance_mem_sizes, root);
ADSP_APP_CREATE_FILE(dram, mem_size, instance_mem_sizes);
ADSP_APP_CREATE_FILE(dram_shared, mem_size, instance_mem_sizes);
ADSP_APP_CREATE_FILE(dram_shared_wc, mem_size, instance_mem_sizes);
ADSP_APP_CREATE_FILE(aram, mem_size, instance_mem_sizes);
ADSP_APP_CREATE_FILE(aram_exclusive, mem_size, instance_mem_sizes);
root = ser->debugfs;
return 0;
rm_debug_root:
debugfs_remove_recursive(root);
err_out:
return ret;
}
static int __init adsp_app_debug_init(struct dentry *root)
{
priv.adsp_app_debugfs_root = debugfs_create_dir("adsp_apps", root);
return IS_ERR_OR_NULL(priv.adsp_app_debugfs_root) ? -ENOMEM : 0;
}
#endif /* CONFIG_DEBUG_FS */
static struct nvadsp_app_service *get_loaded_service(const char *appfile)
{
struct device *dev = &priv.pdev->dev;
struct nvadsp_app_service *ser;
list_for_each_entry(ser, &priv.service_list, node) {
if (!strcmp(appfile, ser->name)) {
dev_dbg(dev, "module %s already loaded\n", appfile);
return ser;
}
}
dev_dbg(dev, "module %s will be loaded\n", appfile);
return NULL;
}
static inline void extract_appname(char *appname, const char *appfile)
{
char *token = strstr(appfile, DYN_APP_EXTN);
int len = token ? token - appfile : strlen(appfile);
strncpy(appname, appfile, len);
appname[len] = '\0';
}
static nvadsp_app_handle_t app_load(const char *appfile,
struct adsp_shared_app *shared_app, bool dynamic)
{
struct nvadsp_drv_data *drv_data;
struct device *dev = &priv.pdev->dev;
char appname[NVADSP_NAME_SZ] = { };
struct nvadsp_app_service *ser;
drv_data = platform_get_drvdata(priv.pdev);
extract_appname(appname, appfile);
mutex_lock(&priv.service_lock_list);
ser = get_loaded_service(appname);
if (!ser) {
/* dynamic loading is disabled when running in secure mode */
if (drv_data->adsp_os_secload && dynamic)
goto err;
dev_dbg(dev, "loading app %s %s\n", appfile, appname);
ser = devm_kzalloc(dev, sizeof(*ser), GFP_KERNEL);
if (!ser)
goto err;
strlcpy(ser->name, appname, NVADSP_NAME_SZ);
/*load the module in to memory */
ser->mod = dynamic ?
load_adsp_dynamic_module(appfile, appfile, dev) :
load_adsp_static_module(appfile, shared_app, dev);
if (IS_ERR_OR_NULL(ser->mod))
goto err_free_service;
ser->mem_size = &ser->mod->mem_size;
mutex_init(&ser->lock);
INIT_LIST_HEAD(&ser->app_head);
/* add the app instance service to the list */
list_add_tail(&ser->node, &priv.service_list);
#ifdef CONFIG_DEBUG_FS
create_adsp_app_debugfs(ser);
#endif
dev_dbg(dev, "loaded app %s\n", ser->name);
}
mutex_unlock(&priv.service_lock_list);
return ser;
err_free_service:
devm_kfree(dev, ser);
err:
mutex_unlock(&priv.service_lock_list);
return NULL;
}
nvadsp_app_handle_t nvadsp_app_load(const char *appname, const char *appfile)
{
struct nvadsp_drv_data *drv_data;
if (IS_ERR_OR_NULL(priv.pdev)) {
pr_err("ADSP Driver is not initialized\n");
return NULL;
}
drv_data = platform_get_drvdata(priv.pdev);
if (!drv_data->adsp_os_running)
return NULL;
return app_load(appfile, NULL, true);
}
EXPORT_SYMBOL(nvadsp_app_load);
static void free_instance_memory(nvadsp_app_info_t *app,
const struct app_mem_size *sz)
{
adsp_app_mem_t *mem = &app->mem;
adsp_app_iova_mem_t *iova_mem = &app->iova_mem;
if (mem->dram) {
nvadsp_free_coherent(sz->dram, mem->dram, iova_mem->dram);
mem->dram = NULL;
iova_mem->dram = 0;
}
if (mem->shared) {
nvadsp_free_coherent(sz->dram_shared, mem->shared,
iova_mem->shared);
mem->shared = NULL;
iova_mem->shared = 0;
}
if (mem->shared_wc) {
nvadsp_free_coherent(sz->dram_shared_wc, mem->shared_wc,
iova_mem->shared_wc);
mem->shared_wc = NULL;
iova_mem->shared_wc = 0;
}
if (mem->aram_flag)
aram_release(mem->aram);
else if (mem->aram)
nvadsp_free_coherent(sz->aram, mem->aram, iova_mem->aram);
mem->aram = NULL;
iova_mem->aram = 0;
mem->aram_flag = 0;
if (mem->aram_x_flag) {
aram_release(mem->aram_x);
mem->aram_x = NULL;
iova_mem->aram_x = 0;
mem->aram_flag = 0;
}
}
static int create_instance_memory(nvadsp_app_info_t *app,
const struct app_mem_size *sz)
{
adsp_app_iova_mem_t *iova_mem = &app->iova_mem;
struct device *dev = &priv.pdev->dev;
adsp_app_mem_t *mem = &app->mem;
char name[NVADSP_NAME_SZ];
void *aram_handle;
dma_addr_t da;
snprintf(name, NVADSP_NAME_SZ, "%s:%d", app->name, app->instance_id);
if (sz->dram) {
mem->dram = nvadsp_alloc_coherent(sz->dram, &da, GFP_KERNEL);
iova_mem->dram = (uint32_t)da;
if (!mem->dram) {
dev_err(dev, "app %s dram alloc failed\n",
name);
goto end;
}
dev_dbg(dev, "%s :: mem.dram %p 0x%x\n", name,
mem->dram, iova_mem->dram);
}
if (sz->dram_shared) {
mem->shared = nvadsp_alloc_coherent(sz->dram_shared,
&da, GFP_KERNEL);
if (!mem->shared) {
dev_err(dev, "app %s shared dram alloc failed\n",
name);
goto end;
}
iova_mem->shared = (uint32_t)da;
dev_dbg(dev, "%s :: mem.shared %p 0x%x\n", name,
mem->shared, iova_mem->shared);
}
if (sz->dram_shared_wc) {
mem->shared_wc = nvadsp_alloc_coherent(sz->dram_shared_wc,
&da, GFP_KERNEL);
if (!mem->shared_wc) {
dev_err(dev, "app %s shared dram wc alloc failed\n",
name);
goto end;
}
iova_mem->shared_wc = (uint32_t)da;
dev_dbg(dev, "%s :: mem.shared_wc %p 0x%x\n", name,
mem->shared_wc, iova_mem->shared_wc);
}
if (sz->aram) {
aram_handle = aram_request(name, sz->aram);
if (!IS_ERR_OR_NULL(aram_handle)) {
iova_mem->aram = aram_get_address(aram_handle);
mem->aram = aram_handle;
iova_mem->aram_flag = mem->aram_flag = 1;
dev_dbg(dev, "%s aram %x\n", name, iova_mem->aram);
} else {
dev_dbg(dev, "app %s no ARAM memory ! using DRAM\n",
name);
mem->aram = nvadsp_alloc_coherent(sz->aram,
&da, GFP_KERNEL);
if (!mem->aram) {
iova_mem->aram_flag = mem->aram_flag = 0;
dev_err(dev,
"app %s aram memory alloc failed\n",
name);
goto end;
}
iova_mem->aram = (uint32_t)da;
dev_dbg(dev, "%s :: mem.aram %p 0x%x\n", name,
mem->aram, iova_mem->aram);
}
}
if (sz->aram_x) {
aram_handle = aram_request(name, sz->aram);
if (!IS_ERR_OR_NULL(aram_handle)) {
iova_mem->aram_x = aram_get_address(aram_handle);
mem->aram_x = aram_handle;
iova_mem->aram_x_flag = mem->aram_x_flag = 1;
dev_dbg(dev, "aram_x %x\n", iova_mem->aram_x);
} else {
iova_mem->aram_x = 0;
iova_mem->aram_x_flag = mem->aram_x_flag = 0;
dev_err(dev, "app %s aram x memory alloc failed\n",
name);
}
}
return 0;
end:
free_instance_memory(app, sz);
return -ENOMEM;
}
static void fill_app_instance_data(nvadsp_app_info_t *app,
struct nvadsp_app_service *ser, nvadsp_app_args_t *app_args,
struct run_app_instance_data *data, uint32_t stack_sz)
{
adsp_app_iova_mem_t *iova_mem = &app->iova_mem;
data->adsp_mod_ptr = ser->mod->adsp_module_ptr;
/* copy the iova address to adsp so that adsp can access the memory */
data->dram_data_ptr = iova_mem->dram;
data->dram_shared_ptr = iova_mem->shared;
data->dram_shared_wc_ptr = iova_mem->shared_wc;
data->aram_ptr = iova_mem->aram;
data->aram_flag = iova_mem->aram_flag;
data->aram_x_ptr = iova_mem->aram_x;
data->aram_x_flag = iova_mem->aram_x_flag;
if (app_args)
memcpy(&data->app_args, app_args, sizeof(nvadsp_app_args_t));
/*
* app on adsp holds the reference of host app instance to communicate
* back when completed. This way we do not need to iterate through the
* list to find the instance.
*/
data->host_ref = (uint64_t)app;
/* copy instance mem_size */
memcpy(&data->mem_size, ser->mem_size, sizeof(struct app_mem_size));
}
static nvadsp_app_info_t *create_app_instance(nvadsp_app_handle_t handle,
nvadsp_app_args_t *app_args, struct run_app_instance_data *data,
app_complete_status_notifier notifier, uint32_t stack_size)
{
struct nvadsp_app_service *ser = (void *)handle;
struct device *dev = &priv.pdev->dev;
nvadsp_app_info_t *app;
int *state;
int *id;
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (unlikely(!app)) {
dev_err(dev, "cannot allocate memory for app %s instance\n",
ser->name);
goto err_value;
}
/* set the instance name with the app name */
app->name = ser->name;
/* associate a unique id */
id = (int *)&app->instance_id;
*id = ser->generated_instance_id++;
/*
* hold the pointer to the service, to dereference later during deinit
*/
app->handle = ser;
/* create the instance memory required by the app instance */
if (create_instance_memory(app, ser->mem_size)) {
dev_err(dev, "instance creation failed for app %s:%d\n",
app->name, app->instance_id);
goto free_app;
}
/* assign the stack that is needed by the app */
data->stack_size = stack_size;
/* set the state to INITIALIZED. No need to do it in a spin lock */
state = (int *)&app->state;
*state = NVADSP_APP_STATE_INITIALIZED;
/* increment instance count and add the app instance to service list */
mutex_lock(&ser->lock);
list_add_tail(&app->node, &ser->app_head);
ser->instance++;
mutex_unlock(&ser->lock);
fill_app_instance_data(app, ser, app_args, data, stack_size);
init_completion(&app->wait_for_app_start);
init_completion(&app->wait_for_app_complete);
set_app_complete_notifier(app, notifier);
dev_dbg(dev, "app %s instance %d initilized\n",
app->name, app->instance_id);
dev_dbg(dev, "app %s has %d instances\n", ser->name, ser->instance);
goto end;
free_app:
kfree(app);
err_value:
app = ERR_PTR(-ENOMEM);
end:
return app;
}
nvadsp_app_info_t __must_check *nvadsp_app_init(nvadsp_app_handle_t handle,
nvadsp_app_args_t *args)
{
struct nvadsp_app_shared_msg_pool *msg_pool;
struct nvadsp_shared_mem *shared_mem;
union app_loader_message *message;
struct nvadsp_drv_data *drv_data;
struct app_loader_data *data;
nvadsp_app_info_t *app;
msgq_t *msgq_send;
int *state;
if (IS_ERR_OR_NULL(priv.pdev)) {
pr_err("ADSP Driver is not initialized\n");
goto err;
}
drv_data = platform_get_drvdata(priv.pdev);
if (!drv_data->adsp_os_running)
goto err;
if (IS_ERR_OR_NULL(handle))
goto err;
message = kzalloc(sizeof(*message), GFP_KERNEL);
if (!message)
goto err;
shared_mem = drv_data->shared_adsp_os_data;
msg_pool = &shared_mem->app_shared_msg_pool;
msgq_send = &msg_pool->app_loader_send_message.msgq;
data = &message->data;
app = create_app_instance(handle, args, &data->app_init, NULL, 0);
if (IS_ERR_OR_NULL(app)) {
kfree(message);
goto err;
}
app->priv = data;
data->app_init.message = ADSP_APP_INIT;
message->msgq_msg.size = MSGQ_MSG_PAYLOAD_WSIZE(*message);
msgq_queue_message(msgq_send, &message->msgq_msg);
if (app->return_status) {
state = (int *)&app->state;
*state = NVADSP_APP_STATE_STARTED;
}
nvadsp_mbox_send(&priv.mbox, 0, NVADSP_MBOX_SMSG, false, 0);
wait_for_completion(&app->wait_for_app_start);
init_completion(&app->wait_for_app_start);
return app;
err:
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(nvadsp_app_init);
static int start_app_on_adsp(nvadsp_app_info_t *app,
union app_loader_message *message, bool block)
{
struct nvadsp_app_shared_msg_pool *msg_pool;
struct device *dev = &priv.pdev->dev;
struct nvadsp_shared_mem *shared_mem;
struct nvadsp_drv_data *drv_data;
msgq_t *msgq_send;
int *state;
drv_data = platform_get_drvdata(priv.pdev);
shared_mem = drv_data->shared_adsp_os_data;
msg_pool = &shared_mem->app_shared_msg_pool;
msgq_send = &msg_pool->app_loader_send_message.msgq;
message->msgq_msg.size = MSGQ_MSG_PAYLOAD_WSIZE(*message);
msgq_queue_message(msgq_send, &message->msgq_msg);
state = (int *)&app->state;
*state = NVADSP_APP_STATE_STARTED;
nvadsp_mbox_send(&priv.mbox, 0, NVADSP_MBOX_SMSG, false, 0);
if (block) {
wait_for_completion(&app->wait_for_app_start);
if (app->return_status) {
dev_err(dev, "%s app instance %d failed to start\n",
app->name, app->instance_id);
state = (int *)&app->state;
*state = NVADSP_APP_STATE_INITIALIZED;
}
}
return app->return_status;
}
int nvadsp_app_start(nvadsp_app_info_t *app)
{
union app_loader_message *message = app->priv;
struct app_loader_data *data = &message->data;
struct nvadsp_drv_data *drv_data;
int ret = -EINVAL;
if (IS_ERR_OR_NULL(app))
return -EINVAL;
if (IS_ERR_OR_NULL(priv.pdev)) {
pr_err("ADSP Driver is not initialized\n");
goto err;
}
drv_data = platform_get_drvdata(priv.pdev);
if (!drv_data->adsp_os_running)
goto err;
data->app_init.message = ADSP_APP_START;
data->app_init.adsp_ref = app->token;
data->app_init.stack_size = app->stack_size;
ret = start_app_on_adsp(app, app->priv, true);
err:
return ret;
}
EXPORT_SYMBOL(nvadsp_app_start);
nvadsp_app_info_t *nvadsp_run_app(nvadsp_os_handle_t os_handle,
const char *appfile, nvadsp_app_args_t *app_args,
app_complete_status_notifier notifier, uint32_t stack_sz, bool block)
{
union app_loader_message message = {};
nvadsp_app_handle_t service_handle;
struct nvadsp_drv_data *drv_data;
nvadsp_app_info_t *info = NULL;
struct app_loader_data *data;
struct device *dev;
int ret;
if (IS_ERR_OR_NULL(priv.pdev)) {
pr_err("ADSP Driver is not initialized\n");
info = ERR_PTR(-EINVAL);
goto end;
}
drv_data = platform_get_drvdata(priv.pdev);
dev = &priv.pdev->dev;
if (!drv_data->adsp_os_running)
goto end;
if (IS_ERR_OR_NULL(appfile))
goto end;
data = &message.data;
service_handle = app_load(appfile, NULL, true);
if (!service_handle) {
dev_err(dev, "unable to load the app %s\n", appfile);
goto end;
}
info = create_app_instance(service_handle, app_args,
&data->app_init, notifier, stack_sz);
if (IS_ERR_OR_NULL(info)) {
dev_err(dev, "unable to create instance for app %s\n", appfile);
goto end;
}
data->app_init.message = RUN_ADSP_APP;
ret = start_app_on_adsp(info, &message, block);
if (ret) {
delete_app_instance(info);
info = NULL;
}
end:
return info;
}
EXPORT_SYMBOL(nvadsp_run_app);
static void delete_app_instance(nvadsp_app_info_t *app)
{
struct nvadsp_app_service *ser =
(struct nvadsp_app_service *)app->handle;
struct device *dev = &priv.pdev->dev;
dev_dbg(dev, "%s:freeing app %s:%d\n",
__func__, app->name, app->instance_id);
/* update the service app instance manager atomically */
mutex_lock(&ser->lock);
ser->instance--;
list_del(&app->node);
mutex_unlock(&ser->lock);
/* free instance memory */
free_instance_memory(app, ser->mem_size);
kfree(app->priv);
kfree(app);
}
void nvadsp_exit_app(nvadsp_app_info_t *app, bool terminate)
{
int *state;
if (IS_ERR_OR_NULL(priv.pdev)) {
pr_err("ADSP Driver is not initialized\n");
return;
}
if (IS_ERR_OR_NULL(app))
return;
/* TODO: add termination if possible to kill thread on adsp */
if (app->state == NVADSP_APP_STATE_STARTED) {
wait_for_completion(&app->wait_for_app_complete);
state = (int *)&app->state;
*state = NVADSP_APP_STATE_INITIALIZED;
}
delete_app_instance(app);
}
EXPORT_SYMBOL(nvadsp_exit_app);
int nvadsp_app_deinit(nvadsp_app_info_t *app)
{
nvadsp_exit_app(app, false);
return 0;
}
EXPORT_SYMBOL(nvadsp_app_deinit);
int nvadsp_app_stop(nvadsp_app_info_t *app)
{
return -ENOENT;
}
EXPORT_SYMBOL(nvadsp_app_stop);
void nvadsp_app_unload(nvadsp_app_handle_t handle)
{
struct nvadsp_drv_data *drv_data;
struct nvadsp_app_service *ser;
struct device *dev;
if (!priv.pdev) {
pr_err("ADSP Driver is not initialized\n");
return;
}
drv_data = platform_get_drvdata(priv.pdev);
dev = &priv.pdev->dev;
if (!drv_data->adsp_os_running)
return;
if (IS_ERR_OR_NULL(handle))
return;
ser = (struct nvadsp_app_service *)handle;
if (!ser->mod->dynamic)
return;
mutex_lock(&priv.service_lock_list);
if (ser->instance) {
dev_err(dev, "cannot unload app %s, has instances %d\n",
ser->name, ser->instance);
return;
}
list_del(&ser->node);
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(ser->debugfs);
#endif
unload_adsp_module(ser->mod);
devm_kfree(dev, ser);
mutex_unlock(&priv.service_lock_list);
}
EXPORT_SYMBOL(nvadsp_app_unload);
static status_t nvadsp_app_receive_handler(uint32_t msg, void *hdata)
{
union app_complete_status_message message = { };
struct nvadsp_app_shared_msg_pool *msg_pool;
struct app_complete_status_data *data;
struct nvadsp_shared_mem *shared_mem;
struct nvadsp_drv_data *drv_data;
struct platform_device *pdev;
nvadsp_app_info_t *app;
struct device *dev;
msgq_t *msgq_recv;
uint32_t *token;
pdev = hdata;
dev = &pdev->dev;
drv_data = platform_get_drvdata(pdev);
shared_mem = drv_data->shared_adsp_os_data;
msg_pool = &shared_mem->app_shared_msg_pool;
msgq_recv = &msg_pool->app_loader_recv_message.msgq;
data = &message.complete_status_data;
message.msgq_msg.size = MSGQ_MSG_PAYLOAD_WSIZE(*data);
if (msgq_dequeue_message(msgq_recv, &message.msgq_msg)) {
dev_err(dev, "unable to dequeue app status message\n");
return 0;
}
app = (nvadsp_app_info_t *)data->host_ref;
app->return_status = data->status;
app->status_msg = data->header.message;
token = (uint32_t *)&app->token;
*token = data->adsp_ref;
if (app->complete_status_notifier) {
app->complete_status_notifier(app,
app->status_msg, app->return_status);
}
switch (data->header.message) {
case ADSP_APP_START_STATUS:
complete_all(&app->wait_for_app_start);
break;
case ADSP_APP_COMPLETE_STATUS:
complete_all(&app->wait_for_app_complete);
break;
}
return 0;
}
int load_adsp_static_apps(void)
{
struct nvadsp_app_shared_msg_pool *msg_pool;
struct nvadsp_shared_mem *shared_mem;
struct nvadsp_drv_data *drv_data;
struct platform_device *pdev;
struct device *dev;
msgq_t *msgq_recv;
pdev = priv.pdev;
dev = &pdev->dev;
drv_data = platform_get_drvdata(pdev);
shared_mem = drv_data->shared_adsp_os_data;
msg_pool = &shared_mem->app_shared_msg_pool;
msgq_recv = &msg_pool->app_loader_recv_message.msgq;
while (1) {
union app_complete_status_message message = { };
struct adsp_static_app_data *data;
struct adsp_shared_app *shared_app;
char *name;
data = &message.static_app_data;
message.msgq_msg.size = MSGQ_MSG_PAYLOAD_WSIZE(*data);
if (msgq_dequeue_message(msgq_recv, &message.msgq_msg)) {
dev_err(dev, "dequeue of static apps failed\n");
return -EINVAL;
}
shared_app = &data->shared_app;
name = shared_app->name;
if (!shared_app->mod_ptr)
break;
/* Skip Start on boot apps */
if (shared_app->flags & ADSP_APP_FLAG_START_ON_BOOT)
continue;
app_load(name, shared_app, false);
}
return 0;
}
int __init nvadsp_app_module_probe(struct platform_device *pdev)
{
#ifdef CONFIG_DEBUG_FS
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
#endif
uint16_t mbox_id = APP_LOADER_MBOX_ID;
struct device *dev = &pdev->dev;
int ret;
dev_info(dev, "%s\n", __func__);
ret = nvadsp_mbox_open(&priv.mbox, &mbox_id,
"app_service", nvadsp_app_receive_handler, pdev);
if (ret) {
dev_err(dev, "unable to open mailbox\n");
goto end;
}
priv.pdev = pdev;
INIT_LIST_HEAD(&priv.service_list);
init_completion(&priv.os_load_complete);
mutex_init(&priv.service_lock_list);
#ifdef CONFIG_DEBUG_FS
if (adsp_app_debug_init(drv_data->adsp_debugfs_root))
dev_err(&pdev->dev, "unable to create adsp apps debugfs\n");
#endif
end:
return ret;
}

View File

@@ -0,0 +1,960 @@
/*
* nvadsp_app.c
*
* ADSP OS App management
*
* Copyright (C) 2014-2015 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
#include <linux/elf.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include "os.h"
#include "dram_app_mem_manager.h"
#include "adsp_shared_struct.h"
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
# define debug_align(X) ALIGN(X, PAGE_SIZE)
#else
# define debug_align(X) (X)
#endif
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
#endif
#define BITS_PER_INT 32
#define INIT_OFFSET_MASK (1U < (BITS_PER_INT-1))
#define HWCAP_SWP (1 << 0)
#define HWCAP_HALF (1 << 1)
#define HWCAP_THUMB (1 << 2)
#define HWCAP_26BIT (1 << 3) /* Play it safe */
#define HWCAP_FAST_MULT (1 << 4)
#define HWCAP_FPA (1 << 5)
#define HWCAP_VFP (1 << 6)
#define HWCAP_EDSP (1 << 7)
#define HWCAP_JAVA (1 << 8)
#define HWCAP_IWMMXT (1 << 9)
#define HWCAP_CRUNCH (1 << 10)
#define HWCAP_THUMBEE (1 << 11)
#define HWCAP_NEON (1 << 12)
#define HWCAP_VFPv3 (1 << 13)
#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
#define HWCAP_TLS (1 << 15)
#define HWCAP_VFPv4 (1 << 16)
#define HWCAP_IDIVA (1 << 17)
#define HWCAP_IDIVT (1 << 18)
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM_32 (1 << 21)
#define EF_ARM_EABI_MASK 0xff000000
#define EF_ARM_EABI_UNKNOWN 0x00000000
#define EF_ARM_EABI_VER1 0x01000000
#define EF_ARM_EABI_VER2 0x02000000
#define EF_ARM_EABI_VER3 0x03000000
#define EF_ARM_EABI_VER4 0x04000000
#define EF_ARM_EABI_VER5 0x05000000
#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
#define EF_ARM_PIC 0x00000020 /* ABI 0 */
#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
#define EF_ARM_HASENTRY 0x00000002 /* All */
#define EF_ARM_RELEXEC 0x00000001 /* All */
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
#define R_ARM_TARGET1 38
#define R_ARM_V4BX 40
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
#define R_ARM_THM_MOVW_ABS_NC 47
#define R_ARM_THM_MOVT_ABS 48
struct load_info {
const char *name;
struct elf32_hdr *hdr;
unsigned long len;
struct elf32_shdr *sechdrs;
char *secstrings, *strtab;
unsigned long symoffs, stroffs;
unsigned int num_debug;
bool sig_ok;
struct device *dev;
struct {
unsigned int sym, str, mod, vers, info, pcpu;
} index;
};
static int
apply_relocate(const struct load_info *info, Elf32_Shdr *sechdrs,
const char *strtab, unsigned int symindex,
unsigned int relindex, struct adsp_module *module)
{
Elf32_Shdr *symsec = sechdrs + symindex;
Elf32_Shdr *relsec = sechdrs + relindex;
Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
Elf32_Rel *rel = (void *)info->hdr + relsec->sh_offset;
struct device *dev = info->dev;
unsigned int i;
dev_dbg(dev, "the relative section is %s dst %s sym %s\n",
info->secstrings + relsec->sh_name,
info->secstrings + dstsec->sh_name,
info->secstrings + symsec->sh_name);
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
void *loc;
Elf32_Sym *sym;
const char *symname;
s32 offset;
u32 upper, lower, sign, j1, j2;
uint32_t adsp_loc;
bool switch_mode = false;
int h_bit = 0;
offset = ELF32_R_SYM(rel->r_info);
if (offset < 0 || (offset >
(symsec->sh_size / sizeof(Elf32_Sym)))) {
dev_err(dev, "%s: section %u reloc %u: bad relocation sym offset\n",
module->name, relindex, i);
return -ENOEXEC;
}
sym = ((Elf32_Sym *)(module->module_ptr
+ symsec->sh_addr)) + offset;
symname = info->strtab + sym->st_name;
dev_dbg(dev, "%s\n", symname);
if (rel->r_offset < 0 ||
rel->r_offset > dstsec->sh_size - sizeof(u32)) {
dev_err(dev,
"%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
module->name, relindex, i, symname,
rel->r_offset, dstsec->sh_size);
return -ENOEXEC;
}
loc = module->module_ptr + dstsec->sh_addr + rel->r_offset;
adsp_loc = module->adsp_module_ptr +
dstsec->sh_addr + rel->r_offset;
dev_dbg(dev, "%p 0x%x\n", loc, adsp_loc);
if (ELF_ST_BIND(sym->st_info) == STB_WEAK
&& sym->st_shndx == SHN_UNDEF) {
dev_dbg(dev, "STB_WEAK %s\n", symname);
continue;
}
switch (ELF32_R_TYPE(rel->r_info)) {
case R_ARM_NONE:
dev_dbg(dev, "R_ARM_NONE\n");
/* ignore */
break;
case R_ARM_ABS32:
case R_ARM_TARGET1:
dev_dbg(dev, "R_ARM_ABS32\n");
*(u32 *)loc += sym->st_value;
dev_dbg(dev, "addrs: 0x%x %p values: 0x%x 0x%x\n",
adsp_loc, loc, sym->st_value,
*(u32 *)loc);
break;
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
dev_dbg(dev, "R_ARM_CALL R_ARM_JUMP24\n");
offset = (*(u32 *)loc & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
offset += sym->st_value - adsp_loc;
if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC)
&& (offset & 3)) {
dev_dbg(dev, "switching the mode from ARM to THUMB\n");
switch_mode = true;
h_bit = (offset & 2);
dev_dbg(dev,
"%s offset 0x%x hbit %d",
symname, offset, h_bit);
}
if (offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000) {
dev_err(dev,
"%s: section %u reloc %u sym '%s': relocation %u out of range (%p -> %#x)\n",
module->name, relindex, i, symname,
ELF32_R_TYPE(rel->r_info), loc,
sym->st_value);
return -ENOEXEC;
}
offset >>= 2;
*(u32 *)loc &= 0xff000000;
*(u32 *)loc |= offset & 0x00ffffff;
if (switch_mode) {
*(u32 *)loc &= ~(0xff000000);
if (h_bit)
*(u32 *)loc |= 0xfb000000;
else
*(u32 *)loc |= 0xfa000000;
}
dev_dbg(dev,
"%s address 0x%x instruction 0x%x\n",
symname, adsp_loc, *(u32 *)loc);
break;
case R_ARM_V4BX:
dev_dbg(dev, "R_ARM_V4BX\n");
/* Preserve Rm and the condition code. Alter
* other bits to re-code instruction as
* MOV PC,Rm.
*/
*(u32 *)loc &= 0xf000000f;
*(u32 *)loc |= 0x01a0f000;
break;
case R_ARM_PREL31:
dev_dbg(dev, "R_ARM_PREL31\n");
offset = *(u32 *)loc + sym->st_value - adsp_loc;
*(u32 *)loc = offset & 0x7fffffff;
break;
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
dev_dbg(dev, "R_ARM_MOVT_ABS\n");
offset = *(u32 *)loc;
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
offset = (offset ^ 0x8000) - 0x8000;
offset += sym->st_value;
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
offset >>= 16;
*(u32 *)loc &= 0xfff0f000;
*(u32 *)loc |= ((offset & 0xf000) << 4) |
(offset & 0x0fff);
break;
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
dev_dbg(dev, "R_ARM_THM_CALL R_ARM_THM_JUMP24\n");
upper = *(u16 *)loc;
lower = *(u16 *)(loc + 2);
/*
* 25 bit signed address range (Thumb-2 BL and B.W
* instructions):
* S:I1:I2:imm10:imm11:0
* where:
* S = upper[10] = offset[24]
* I1 = ~(J1 ^ S) = offset[23]
* I2 = ~(J2 ^ S) = offset[22]
* imm10 = upper[9:0] = offset[21:12]
* imm11 = lower[10:0] = offset[11:1]
* J1 = lower[13]
* J2 = lower[11]
*/
sign = (upper >> 10) & 1;
j1 = (lower >> 13) & 1;
j2 = (lower >> 11) & 1;
offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
((~(j2 ^ sign) & 1) << 22) |
((upper & 0x03ff) << 12) |
((lower & 0x07ff) << 1);
if (offset & 0x01000000)
offset -= 0x02000000;
offset += sym->st_value - adsp_loc;
/*
* For function symbols, only Thumb addresses are
* allowed (no interworking).
*
* For non-function symbols, the destination
* has no specific ARM/Thumb disposition, so
* the branch is resolved under the assumption
* that interworking is not required.
*/
if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
!(offset & 1)) {
dev_dbg(dev,
"switching the mode from THUMB to ARM\n");
switch_mode = true;
offset = ALIGN(offset, 4);
}
if (offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000) {
dev_err(dev,
"%s: section %u reloc %u sym '%s': relocation %u out of range (%p -> %#x)\n",
module->name, relindex, i, symname,
ELF32_R_TYPE(rel->r_info), loc,
sym->st_value);
return -ENOEXEC;
}
sign = (offset >> 24) & 1;
j1 = sign ^ (~(offset >> 23) & 1);
j2 = sign ^ (~(offset >> 22) & 1);
*(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
((offset >> 12) & 0x03ff));
*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
(j1 << 13) | (j2 << 11) |
((offset >> 1) & 0x07ff));
if (switch_mode) {
lower = *(u16 *)(loc + 2);
lower &= (~(1 << 12));
*(u16 *)(loc + 2) = lower;
}
dev_dbg(dev,
"%s address 0x%x upper instruction 0x%x\n",
symname, adsp_loc, *(u16 *)loc);
dev_dbg(dev,
"%s address 0x%x lower instruction 0x%x\n",
symname, adsp_loc, *(u16 *)(loc + 2));
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
dev_dbg(dev, "in R_ARM_THM_MOVT_ABS\n");
upper = *(u16 *)loc;
lower = *(u16 *)(loc + 2);
/*
* MOVT/MOVW instructions encoding in Thumb-2:
*
* i = upper[10]
* imm4 = upper[3:0]
* imm3 = lower[14:12]
* imm8 = lower[7:0]
*
* imm16 = imm4:i:imm3:imm8
*/
offset = ((upper & 0x000f) << 12) |
((upper & 0x0400) << 1) |
((lower & 0x7000) >> 4) | (lower & 0x00ff);
offset = (offset ^ 0x8000) - 0x8000;
offset += sym->st_value;
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
offset >>= 16;
*(u16 *)loc = (u16)((upper & 0xfbf0) |
((offset & 0xf000) >> 12) |
((offset & 0x0800) >> 1));
*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
((offset & 0x0700) << 4) |
(offset & 0x00ff));
break;
default:
dev_err(dev, "%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
return -ENOEXEC;
}
}
return 0;
}
static int
apply_relocations(struct adsp_module *mod,
const struct load_info *info)
{
unsigned int i;
int err = 0;
/* Now do relocations. */
for (i = 1; i < info->hdr->e_shnum; i++) {
unsigned int infosec = info->sechdrs[i].sh_info;
/* Not a valid relocation section? */
if (infosec >= info->hdr->e_shnum)
continue;
/* Don't bother with non-allocated sections */
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue;
if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info, info->sechdrs, info->strtab,
info->index.sym, i, mod);
else if (info->sechdrs[i].sh_type == SHT_RELA)
return -EINVAL;
if (err < 0)
break;
}
return err;
}
static int
simplify_symbols(struct adsp_module *mod,
const struct load_info *info)
{
Elf32_Shdr *symsec = &info->sechdrs[info->index.sym];
Elf32_Sym *sym = mod->module_ptr + symsec->sh_addr;
unsigned int secbase;
unsigned int i;
int ret = 0;
struct global_sym_info *sym_info;
struct device *dev = info->dev;
for (i = 1; i < symsec->sh_size / sizeof(Elf32_Sym); i++) {
const char *name = info->strtab + sym[i].st_name;
dev_dbg(dev, "%s\n", name);
switch (sym[i].st_shndx) {
case SHN_COMMON:
/* We compiled with -fno-common. These are not
supposed to happen. */
dev_err(dev, "Common symbol: '%s'\n", name);
dev_err(dev,
"please compile module %s with -fno-common\n",
mod->name);
ret = -ENOEXEC;
goto end;
case SHN_ABS:
/* Don't need to do anything */
dev_dbg(dev, "Absolute symbol: 0x%08lx\n",
(long)sym[i].st_value);
break;
case SHN_UNDEF:
sym_info = find_global_symbol(name);
/* Ok if resolved. */
if (sym_info) {
dev_dbg(dev, "SHN_UNDEF sym '%s':0x%x\n",
name, sym_info->addr);
sym[i].st_value = sym_info->addr;
sym[i].st_info = sym_info->info;
break;
}
if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK) {
dev_dbg(dev, "WEAK SYM %s not resolved\n",
name);
break;
}
dev_err(dev, "No symbol '%s' found\n", name);
ret = -ENOEXEC;
goto end;
default:
/* Divert to percpu allocation if a percpu var. */
dev_dbg(dev, "default\n");
secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
sym[i].st_value += secbase + mod->adsp_module_ptr;
dev_dbg(dev, "symbol %s is 0x%x\n",
name, sym[i].st_value);
break;
}
}
end:
return ret;
}
static int move_module(struct adsp_module *mod, struct load_info *info)
{
struct device *dev = info->dev;
int i;
mod->handle = dram_app_mem_request(info->name, mod->size);
if (!mod->handle) {
dev_err(dev, "cannot allocate memory for app %s\n", info->name);
return -ENOMEM;
}
mod->adsp_module_ptr = dram_app_mem_get_address(mod->handle);
mod->module_ptr = nvadsp_da_to_va_mappings(mod->adsp_module_ptr,
mod->size);
dev_info(dev, "module %s Load address %p 0x%x\n", info->name,
mod->module_ptr, mod->adsp_module_ptr);
/* Transfer each section which specifies SHF_ALLOC */
dev_dbg(dev, "final section addresses:\n");
for (i = 0; i < info->hdr->e_shnum; i++) {
void *dest;
struct elf32_shdr *shdr = &info->sechdrs[i];
if (!(shdr->sh_flags & SHF_ALLOC))
continue;
if (shdr->sh_entsize & INIT_OFFSET_MASK) {
dev_dbg(dev, "%s %d\n",
info->secstrings + shdr->sh_name,
shdr->sh_entsize);
dest = mod->module_ptr
+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
} else {
dev_dbg(dev, "%s %d\n",
info->secstrings + shdr->sh_name,
shdr->sh_entsize);
dest = mod->module_ptr + shdr->sh_entsize;
}
if (shdr->sh_type != SHT_NOBITS)
memcpy(dest,
(void *)info->hdr + shdr->sh_offset,
shdr->sh_size);
/* Update sh_addr to point to copy in image. */
shdr->sh_addr = (uint32_t)(dest - mod->module_ptr);
dev_dbg(dev, "name %s 0x%x %p 0x%x 0x%x\n",
info->secstrings + shdr->sh_name, shdr->sh_addr,
dest, shdr->sh_addr + mod->adsp_module_ptr,
shdr->sh_size);
}
return 0;
}
static int get_offset(struct adsp_module *mod, size_t *size,
struct elf32_shdr *sechdr, unsigned int section)
{
int ret;
ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
*size = ret + sechdr->sh_size;
return ret;
}
static bool
is_core_symbol(const struct elf32_sym *src,
const struct elf32_shdr *sechdrs, unsigned int shnum)
{
const struct elf32_shdr *sec;
if (src->st_shndx == SHN_UNDEF
|| src->st_shndx >= shnum
|| !src->st_name)
return false;
sec = sechdrs + src->st_shndx;
if (!(sec->sh_flags & SHF_ALLOC)
#ifndef CONFIG_KALLSYMS_ALL
|| !(sec->sh_flags & SHF_EXECINSTR)
#endif
|| (sec->sh_entsize & INIT_OFFSET_MASK))
return false;
return true;
}
static void layout_sections(struct adsp_module *mod, struct load_info *info)
{
static unsigned long const masks[][2] = {
/* NOTE: all executable code must be the first section
* in this array; otherwise modify the text_size
* finder in the two loops below */
{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
};
unsigned int m, i;
struct device *dev = info->dev;
for (i = 0; i < info->hdr->e_shnum; i++)
info->sechdrs[i].sh_entsize = ~0U;
dev_dbg(dev, "Core section allocation order:\n");
for (m = 0; m < ARRAY_SIZE(masks); ++m) {
for (i = 0; i < info->hdr->e_shnum; ++i) {
struct elf32_shdr *s = &info->sechdrs[i];
const char *sname = info->secstrings + s->sh_name;
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0U
|| strstarts(sname, ".init"))
continue;
s->sh_entsize = get_offset(mod, &mod->size, s, i);
dev_dbg(dev, "\t%s %d\n", sname, s->sh_entsize);
}
}
dev_dbg(dev, "Init section allocation order:\n");
for (m = 0; m < ARRAY_SIZE(masks); ++m) {
for (i = 0; i < info->hdr->e_shnum; ++i) {
struct elf32_shdr *s = &info->sechdrs[i];
const char *sname = info->secstrings + s->sh_name;
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0U
|| !strstarts(sname, ".init"))
continue;
s->sh_entsize = (get_offset(mod, &mod->size, s, i)
| INIT_OFFSET_MASK);
dev_dbg(dev, "\t%s %d\n", sname, s->sh_entsize);
}
}
}
static int rewrite_section_headers(struct load_info *info)
{
unsigned int i;
struct device *dev = info->dev;
/* This should always be true, but let's be sure. */
info->sechdrs[0].sh_addr = 0;
for (i = 1; i < info->hdr->e_shnum; i++) {
struct elf32_shdr *shdr = &info->sechdrs[i];
if (shdr->sh_type != SHT_NOBITS
&& info->len < shdr->sh_offset + shdr->sh_size) {
dev_err(dev, "Module len %lu truncated\n", info->len);
return -ENOEXEC;
}
/* Mark all sections sh_addr with their address in the
temporary image. */
shdr->sh_addr = shdr->sh_offset;
}
return 0;
}
static struct adsp_module *setup_load_info(struct load_info *info)
{
unsigned int i;
int err;
struct adsp_module *mod;
struct device *dev = info->dev;
/* Set up the convenience variables */
info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
info->secstrings = (void *)info->hdr
+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
err = rewrite_section_headers(info);
if (err)
return ERR_PTR(err);
/* Find internal symbols and strings. */
for (i = 1; i < info->hdr->e_shnum; i++) {
if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
info->index.sym = i;
info->index.str = info->sechdrs[i].sh_link;
info->strtab = (char *)info->hdr
+ info->sechdrs[info->index.str].sh_offset;
break;
}
}
/* This is temporary: point mod into copy of data. */
mod = kzalloc(sizeof(struct adsp_module), GFP_KERNEL);
if (!mod) {
dev_err(dev, "Unable to create module\n");
return ERR_PTR(-ENOMEM);
}
if (info->index.sym == 0) {
dev_warn(dev, "%s: module has no symbols (stripped?)\n",
mod->name);
kfree(mod);
return ERR_PTR(-ENOEXEC);
}
return mod;
}
static void layout_symtab(struct adsp_module *mod, struct load_info *info)
{
struct elf32_shdr *symsect = info->sechdrs + info->index.sym;
struct elf32_shdr *strsect = info->sechdrs + info->index.str;
const struct elf32_sym *src;
unsigned int i, nsrc, ndst, strtab_size = 0;
struct device *dev = info->dev;
/* Put symbol section at end of init part of module. */
symsect->sh_flags |= SHF_ALLOC;
symsect->sh_entsize = get_offset(mod, &mod->size, symsect,
info->index.sym) | INIT_OFFSET_MASK;
dev_dbg(dev, "\t%s %d\n", info->secstrings + symsect->sh_name,
symsect->sh_entsize);
src = (void *)info->hdr + symsect->sh_offset;
nsrc = symsect->sh_size / sizeof(*src);
/* Compute total space required for the core symbols' strtab. */
for (ndst = i = 0; i < nsrc; i++) {
if (i == 0 ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
strtab_size += strlen(&info->strtab[src[i].st_name])+1;
ndst++;
}
}
/* Append room for core symbols at end of core part. */
info->symoffs = ALIGN(mod->size, symsect->sh_addralign ?: 1);
info->stroffs = mod->size = info->symoffs + ndst * sizeof(Elf32_Sym);
mod->size += strtab_size;
/* Put string table section at end of init part of module. */
strsect->sh_flags |= SHF_ALLOC;
strsect->sh_entsize = get_offset(mod, &mod->size, strsect,
info->index.str) | INIT_OFFSET_MASK;
dev_dbg(dev, "\t%s %d\n",
info->secstrings + strsect->sh_name,
symsect->sh_entsize);
}
static struct adsp_module *layout_and_allocate(struct load_info *info)
{
/* Module within temporary copy. */
struct adsp_module *mod;
int err;
mod = setup_load_info(info);
if (IS_ERR(mod))
return mod;
mod->name = info->name;
/* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically; there doesn't appear to be any
special cases for the architectures. */
layout_sections(mod, info);
layout_symtab(mod, info);
/* Allocate and move to the final place */
err = move_module(mod, info);
if (err) {
/* TODO: need to handle error path more genericly */
kfree(mod);
return ERR_PTR(err);
}
return mod;
}
static int elf_check_arch_arm32(const struct elf32_hdr *x)
{
unsigned int eflags;
/* Make sure it's an ARM executable */
if (x->e_machine != EM_ARM)
return 0;
/* Make sure the entry address is reasonable */
if (x->e_entry & 1) {
if (!(elf_hwcap & HWCAP_THUMB))
return 0;
} else if (x->e_entry & 3)
return 0;
eflags = x->e_flags;
if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
unsigned int flt_fmt;
/* APCS26 is only allowed if the CPU supports it */
if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
return 0;
flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
/* VFP requires the supporting code */
if (flt_fmt == EF_ARM_VFP_FLOAT && !(elf_hwcap & HWCAP_VFP))
return 0;
}
return 1;
}
static int elf_header_check(struct load_info *info)
{
if (info->len < sizeof(*(info->hdr)))
return -ENOEXEC;
if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
|| info->hdr->e_type != ET_REL
|| !elf_check_arch_arm32(info->hdr)
|| info->hdr->e_shentsize != sizeof(Elf32_Shdr))
return -ENOEXEC;
if (info->hdr->e_shoff >= info->len
|| (info->hdr->e_shnum * sizeof(Elf32_Shdr) >
info->len - info->hdr->e_shoff))
return -ENOEXEC;
return 0;
}
struct adsp_module *load_adsp_static_module(const char *appname,
struct adsp_shared_app *shared_app, struct device *dev)
{
struct adsp_module *mod = NULL;
mod = kzalloc(sizeof(struct adsp_module), GFP_KERNEL);
if (!mod)
return NULL;
memcpy((struct app_mem_size *)&mod->mem_size,
&shared_app->mem_size, sizeof(shared_app->mem_size));
mod->adsp_module_ptr = shared_app->mod_ptr;
mod->dynamic = false;
return mod;
}
struct adsp_module *load_adsp_dynamic_module(const char *appname,
const char *appfile, struct device *dev)
{
struct load_info info = { };
struct adsp_module *mod = NULL;
const struct firmware *fw;
struct elf32_shdr *data_shdr;
struct elf32_shdr *shared_shdr;
struct elf32_shdr *shared_wc_shdr;
struct elf32_shdr *aram_shdr;
struct elf32_shdr *aram_x_shdr;
struct app_mem_size *mem_size;
int ret;
ret = request_firmware(&fw, appfile, dev);
if (ret < 0) {
dev_err(dev,
"request firmware for %s(%s) failed with %d\n",
appname, appfile, ret);
return ERR_PTR(ret);
}
info.hdr = (struct elf32_hdr *)fw->data;
info.len = fw->size;
info.dev = dev;
info.name = appname;
ret = elf_header_check(&info);
if (ret) {
dev_err(dev,
"%s is not an elf file\n", appfile);
goto error_release_fw;
}
/* Figure out module layout, and allocate all the memory. */
mod = layout_and_allocate(&info);
if (IS_ERR(mod))
goto error_release_fw;
/* update adsp specific sections */
data_shdr = nvadsp_get_section(fw, ".dram_data");
shared_shdr = nvadsp_get_section(fw, ".dram_shared");
shared_wc_shdr = nvadsp_get_section(fw, ".dram_shared_wc");
aram_shdr = nvadsp_get_section(fw, ".aram_data");
aram_x_shdr = nvadsp_get_section(fw, ".aram_x_data");
mem_size = (void *)&mod->mem_size;
if (data_shdr) {
dev_dbg(dev, "mem_size.dram_data %d\n",
data_shdr->sh_size);
mem_size->dram = data_shdr->sh_size;
}
if (shared_shdr) {
dev_dbg(dev, "mem_size.dram_shared %d\n",
shared_shdr->sh_size);
mem_size->dram_shared =
shared_shdr->sh_size;
}
if (shared_wc_shdr) {
dev_dbg(dev, "shared_wc_shdr->sh_size %d\n",
shared_wc_shdr->sh_size);
mem_size->dram_shared_wc =
shared_wc_shdr->sh_size;
}
if (aram_shdr) {
dev_dbg(dev, "aram_shdr->sh_size %d\n", aram_shdr->sh_size);
mem_size->aram = aram_shdr->sh_size;
}
if (aram_x_shdr) {
dev_dbg(dev,
"aram_x_shdr->sh_size %d\n", aram_x_shdr->sh_size);
mem_size->aram_x = aram_x_shdr->sh_size;
}
/* Fix up syms, so that st_value is a pointer to location. */
ret = simplify_symbols(mod, &info);
if (ret) {
dev_err(dev, "Unable to simplify symbols\n");
goto unload_module;
}
dev_dbg(dev, "applying relocation\n");
ret = apply_relocations(mod, &info);
if (ret) {
dev_err(dev, "relocation failed\n");
goto unload_module;
}
mod->dynamic = true;
error_release_fw:
release_firmware(fw);
return IS_ERR_VALUE(ret) ? ERR_PTR(ret) : mod;
unload_module:
unload_adsp_module(mod);
release_firmware(fw);
return ERR_PTR(ret);
}
void unload_adsp_module(struct adsp_module *mod)
{
dram_app_mem_release(mod->handle);
kfree(mod);
}

View File

@@ -0,0 +1,105 @@
/*
* aram_managerc
*
* ARAM manager
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include "aram_manager.h"
static void *aram_handle;
static LIST_HEAD(aram_alloc_list);
static LIST_HEAD(aram_free_list);
void aram_print(void)
{
mem_print(aram_handle);
}
EXPORT_SYMBOL(aram_print);
void *aram_request(const char *name, size_t size)
{
return mem_request(aram_handle, name, size);
}
EXPORT_SYMBOL(aram_request);
bool aram_release(void *handle)
{
return mem_release(aram_handle, handle);
}
EXPORT_SYMBOL(aram_release);
unsigned long aram_get_address(void *handle)
{
return mem_get_address(handle);
}
EXPORT_SYMBOL(aram_get_address);
#ifdef CONFIG_DEBUG_FS
static struct dentry *aram_dump_debugfs_file;
static int aram_dump(struct seq_file *s, void *data)
{
mem_dump(aram_handle, s);
return 0;
}
static int aram_dump_open(struct inode *inode, struct file *file)
{
return single_open(file, aram_dump, inode->i_private);
}
static const struct file_operations aram_dump_fops = {
.open = aram_dump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
int aram_init(unsigned long addr, unsigned long size)
{
aram_handle = create_mem_manager("ARAM", addr, size);
if (IS_ERR(aram_handle)) {
pr_err("ERROR: failed to create aram memory_manager");
return PTR_ERR(aram_handle);
}
#ifdef CONFIG_DEBUG_FS
aram_dump_debugfs_file = debugfs_create_file("aram_dump",
S_IRUSR, NULL, NULL, &aram_dump_fops);
if (!aram_dump_debugfs_file) {
pr_err("ERROR: failed to create aram_dump debugfs");
destroy_mem_manager(aram_handle);
return -ENOMEM;
}
#endif
return 0;
}
EXPORT_SYMBOL(aram_init);
void aram_exit(void)
{
#ifdef CONFIG_DEBUG_FS
debugfs_remove(aram_dump_debugfs_file);
#endif
destroy_mem_manager(aram_handle);
}
EXPORT_SYMBOL(aram_exit);

View File

@@ -0,0 +1,30 @@
/*
* Header file for aram manager
*
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __TEGRA_NVADSP_ARAM_MANAGER_H
#define __TEGRA_NVADSP_ARAM_MANAGER_H
#include "mem_manager.h"
int aram_init(unsigned long addr, unsigned long size);
void aram_exit(void);
void *aram_request(const char *name, size_t size);
bool aram_release(void *handle);
unsigned long aram_get_address(void *handle);
void aram_print(void);
#endif /* __TEGRA_NVADSP_ARAM_MANAGER_H */

View File

@@ -0,0 +1,319 @@
/*
* dev-t21x.c
*
* A device driver for ADSP and APE
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/tegra_nvadsp.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include "dev.h"
#include "amc.h"
#ifdef CONFIG_PM
static void nvadsp_clocks_disable(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
if (drv_data->uartape_clk) {
clk_disable_unprepare(drv_data->uartape_clk);
dev_dbg(dev, "uartape clock disabled\n");
drv_data->uartape_clk = NULL;
}
if (drv_data->adsp_cpu_clk) {
clk_disable_unprepare(drv_data->adsp_cpu_clk);
dev_dbg(dev, "adsp_cpu clock disabled\n");
drv_data->adsp_cpu_clk = NULL;
}
if (drv_data->adsp_clk) {
clk_disable_unprepare(drv_data->adsp_clk);
dev_dbg(dev, "adsp clocks disabled\n");
drv_data->adsp_clk = NULL;
}
if (drv_data->ape_clk) {
clk_disable_unprepare(drv_data->ape_clk);
dev_dbg(dev, "ape clock disabled\n");
drv_data->ape_clk = NULL;
}
if (drv_data->ape_emc_clk) {
clk_disable_unprepare(drv_data->ape_emc_clk);
dev_dbg(dev, "ape.emc clock disabled\n");
drv_data->ape_emc_clk = NULL;
}
if (drv_data->ahub_clk) {
clk_disable_unprepare(drv_data->ahub_clk);
dev_dbg(dev, "ahub clock disabled\n");
drv_data->ahub_clk = NULL;
}
}
static int nvadsp_clocks_enable(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
uint32_t val;
int ret = 0;
drv_data->ahub_clk = clk_get_sys("nvadsp", "ahub");
if (IS_ERR_OR_NULL(drv_data->ahub_clk)) {
dev_err(dev, "unable to find ahub clock\n");
ret = PTR_ERR(drv_data->ahub_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->ahub_clk);
if (ret) {
dev_err(dev, "unable to enable ahub clock\n");
goto end;
}
dev_dbg(dev, "ahub clock enabled\n");
drv_data->ape_clk = clk_get_sys(NULL, "adsp.ape");
if (IS_ERR_OR_NULL(drv_data->ape_clk)) {
dev_err(dev, "unable to find ape clock\n");
ret = PTR_ERR(drv_data->ape_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->ape_clk);
if (ret) {
dev_err(dev, "unable to enable ape clock\n");
goto end;
}
dev_dbg(dev, "ape clock enabled\n");
drv_data->adsp_clk = clk_get_sys(NULL, "adsp");
if (IS_ERR_OR_NULL(drv_data->adsp_clk)) {
dev_err(dev, "unable to find adsp clock\n");
ret = PTR_ERR(drv_data->adsp_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->adsp_clk);
if (ret) {
dev_err(dev, "unable to enable adsp clock\n");
goto end;
}
drv_data->adsp_cpu_clk = clk_get_sys(NULL, "adsp_cpu");
if (IS_ERR_OR_NULL(drv_data->adsp_cpu_clk)) {
dev_err(dev, "unable to find adsp cpu clock\n");
ret = PTR_ERR(drv_data->adsp_cpu_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->adsp_cpu_clk);
if (ret) {
dev_err(dev, "unable to enable adsp cpu clock\n");
goto end;
}
dev_dbg(dev, "adsp cpu clock enabled\n");
drv_data->ape_emc_clk = clk_get_sys("ape", "emc");
if (IS_ERR_OR_NULL(drv_data->ape_emc_clk)) {
dev_err(dev, "unable to find ape.emc clock\n");
ret = PTR_ERR(drv_data->ape_emc_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->ape_emc_clk);
if (ret) {
dev_err(dev, "unable to enable ape.emc clock\n");
goto end;
}
dev_dbg(dev, "ape.emc is enabled\n");
drv_data->uartape_clk = clk_get_sys("uartape", NULL);
if (IS_ERR_OR_NULL(drv_data->uartape_clk)) {
dev_err(dev, "unable to find uart ape clk\n");
ret = PTR_ERR(drv_data->uartape_clk);
goto end;
}
ret = clk_prepare_enable(drv_data->uartape_clk);
if (ret) {
dev_err(dev, "unable to enable uartape clock\n");
goto end;
}
clk_set_rate(drv_data->uartape_clk, UART_BAUD_RATE * 16);
dev_dbg(dev, "uartape clock enabled\n");
/* Set MAXCLKLATENCY value before ADSP deasserting reset */
val = readl(drv_data->base_regs[AMISC] + ADSP_CONFIG);
writel(val | MAXCLKLATENCY, drv_data->base_regs[AMISC] + ADSP_CONFIG);
dev_dbg(dev, "all clocks enabled\n");
return 0;
end:
nvadsp_clocks_disable(pdev);
return ret;
}
static inline bool nvadsp_amsic_skip_reg(u32 offset)
{
if (offset == AMISC_ADSP_L2_REGFILEBASE ||
offset == AMISC_SHRD_SMP_STA ||
(offset >= AMISC_SEM_REG_START && offset <= AMISC_SEM_REG_END) ||
offset == AMISC_TSC ||
offset == AMISC_ACTMON_AVG_CNT) {
return true;
} else {
return false;
}
}
static int nvadsp_amisc_save(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
u32 val, offset;
int i = 0;
offset = AMISC_REG_START_OFFSET;
while (offset <= AMISC_REG_MBOX_OFFSET) {
if (nvadsp_amsic_skip_reg(offset)) {
offset += 4;
continue;
}
val = readl(d->base_regs[AMISC] + offset);
d->state.amisc_regs[i++] = val;
offset += 4;
}
offset = ADSP_ACTMON_REG_START_OFFSET;
while (offset <= ADSP_ACTMON_REG_END_OFFSET) {
if (nvadsp_amsic_skip_reg(offset)) {
offset += 4;
continue;
}
val = readl(d->base_regs[AMISC] + offset);
d->state.amisc_regs[i++] = val;
offset += 4;
}
return 0;
}
static int nvadsp_amisc_restore(struct platform_device *pdev)
{
struct nvadsp_drv_data *d = platform_get_drvdata(pdev);
u32 val, offset;
int i = 0;
offset = AMISC_REG_START_OFFSET;
while (offset <= AMISC_REG_MBOX_OFFSET) {
if (nvadsp_amsic_skip_reg(offset)) {
offset += 4;
continue;
}
val = d->state.amisc_regs[i++];
writel(val, d->base_regs[AMISC] + offset);
offset += 4;
}
offset = ADSP_ACTMON_REG_START_OFFSET;
while (offset <= ADSP_ACTMON_REG_END_OFFSET) {
if (nvadsp_amsic_skip_reg(offset)) {
offset += 4;
continue;
}
val = d->state.amisc_regs[i++];
writel(val, d->base_regs[AMISC] + offset);
offset += 4;
}
return 0;
}
static int __nvadsp_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = 0;
dev_dbg(dev, "restoring adsp base regs\n");
drv_data->base_regs = drv_data->base_regs_saved;
dev_dbg(dev, "enabling clocks\n");
ret = nvadsp_clocks_enable(pdev);
if (ret) {
dev_err(dev, "nvadsp_clocks_enable failed\n");
goto skip;
}
if (!drv_data->adsp_os_suspended) {
dev_dbg(dev, "%s: adsp os is not suspended\n", __func__);
goto skip;
}
dev_dbg(dev, "restoring ape state\n");
nvadsp_amc_restore(pdev);
nvadsp_aram_restore(pdev);
nvadsp_amisc_restore(pdev);
skip:
return ret;
}
static int __nvadsp_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = 0;
if (!drv_data->adsp_os_suspended) {
dev_dbg(dev, "%s: adsp os is not suspended\n", __func__);
goto clocks;
}
dev_dbg(dev, "saving amsic\n");
nvadsp_amisc_save(pdev);
dev_dbg(dev, "saving aram\n");
nvadsp_aram_save(pdev);
dev_dbg(dev, "saving amc\n");
nvadsp_amc_save(pdev);
clocks:
dev_dbg(dev, "disabling clocks\n");
nvadsp_clocks_disable(pdev);
dev_dbg(dev, "locking out adsp base regs\n");
drv_data->base_regs = NULL;
return ret;
}
static int __nvadsp_runtime_idle(struct device *dev)
{
return 0;
}
int __init nvadsp_pm_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
drv_data->runtime_suspend = __nvadsp_runtime_suspend;
drv_data->runtime_resume = __nvadsp_runtime_resume;
drv_data->runtime_idle = __nvadsp_runtime_idle;
return 0;
}
#endif /* CONFIG_PM */
int __init nvadsp_reset_init(struct platform_device *pdev)
{
return 0;
}

View File

@@ -0,0 +1,52 @@
/*
* Copyright (C) 2015-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_DEV_T21X_H
#define __TEGRA_NVADSP_DEV_T21X_H
/*
* Note: These enums should be aligned to the regs mentioned in the
* device tree
*/
enum {
AMC,
AMISC,
ABRIDGE,
UNIT_FPGA_RST,
APE_MAX_REG
};
enum {
ADSP_DRAM1,
ADSP_DRAM2,
ADSP_MAX_DRAM_MAP
};
/*
* Note: These enums should be aligned to the adsp_mem node mentioned in the
* device tree
*/
enum adsp_mem_dt {
ADSP_OS_ADDR,
ADSP_OS_SIZE,
ADSP_APP_ADDR,
ADSP_APP_SIZE,
ARAM_ALIAS_0_ADDR,
ARAM_ALIAS_0_SIZE,
ACSR_ADDR, /* ACSR: ADSP CPU SHARED REGION */
ACSR_SIZE,
ADSP_MEM_END,
};
#endif /* __TEGRA_NVADSP_DEV_T21X_H */

View File

@@ -0,0 +1,414 @@
/*
* dev.c
*
* A device driver for ADSP and APE
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/tegra_nvadsp.h>
#include <linux/tegra-soc.h>
#include <linux/pm_runtime.h>
#include <linux/tegra_pm_domains.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <asm/arch_timer.h>
#include "dev.h"
#include "os.h"
#include "amc.h"
#include "ape_actmon.h"
#include "aram_manager.h"
static struct nvadsp_drv_data *nvadsp_drv_data;
#ifdef CONFIG_DEBUG_FS
static int __init adsp_debug_init(struct nvadsp_drv_data *drv_data)
{
drv_data->adsp_debugfs_root = debugfs_create_dir("tegra_ape", NULL);
if (!drv_data->adsp_debugfs_root)
return -ENOMEM;
return 0;
}
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_PM_SLEEP
static int nvadsp_suspend(struct device *dev)
{
return 0;
}
static int nvadsp_resume(struct device *dev)
{
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int nvadsp_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = -EINVAL;
if (drv_data->runtime_resume)
ret = drv_data->runtime_resume(dev);
return ret;
}
static int nvadsp_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = -EINVAL;
if (drv_data->runtime_suspend)
ret = drv_data->runtime_suspend(dev);
return ret;
}
static int nvadsp_runtime_idle(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
int ret = 0;
if (drv_data->runtime_idle)
ret = drv_data->runtime_idle(dev);
return ret;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops nvadsp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(nvadsp_suspend, nvadsp_resume)
SET_RUNTIME_PM_OPS(nvadsp_runtime_suspend, nvadsp_runtime_resume,
nvadsp_runtime_idle)
};
uint64_t nvadsp_get_timestamp_counter(void)
{
return arch_counter_get_cntvct();
}
EXPORT_SYMBOL(nvadsp_get_timestamp_counter);
static void __init nvadsp_parse_clk_entries(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
u32 val32 = 0;
/* Optional properties, should come from platform dt files */
if (of_property_read_u32(dev->of_node, "nvidia,adsp_freq", &val32))
dev_dbg(dev, "adsp_freq dt not found\n");
else {
drv_data->adsp_freq = val32;
drv_data->adsp_freq_hz = val32 * 1000;
}
if (of_property_read_u32(dev->of_node, "nvidia,ape_freq", &val32))
dev_dbg(dev, "ape_freq dt not found\n");
else
drv_data->ape_freq = val32;
if (of_property_read_u32(dev->of_node, "nvidia,ape_emc_freq", &val32))
dev_dbg(dev, "ape_emc_freq dt not found\n");
else
drv_data->ape_emc_freq = val32;
}
static int __init nvadsp_parse_dt(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
u32 *adsp_reset;
u32 *adsp_mem;
int iter;
adsp_reset = drv_data->unit_fpga_reset;
adsp_mem = drv_data->adsp_mem;
for (iter = 0; iter < ADSP_MEM_END; iter++) {
if (of_property_read_u32_index(dev->of_node, "nvidia,adsp_mem",
iter, &adsp_mem[iter])) {
dev_err(dev, "adsp memory dt %d not found\n", iter);
return -EINVAL;
}
}
drv_data->adsp_unit_fpga = of_property_read_bool(dev->of_node,
"nvidia,adsp_unit_fpga");
drv_data->adsp_os_secload = of_property_read_bool(dev->of_node,
"nvidia,adsp_os_secload");
if (drv_data->adsp_unit_fpga) {
for (iter = 0; iter < ADSP_UNIT_FPGA_RESET_END; iter++) {
if (of_property_read_u32_index(dev->of_node,
"nvidia,adsp_unit_fpga_reset", iter,
&adsp_reset[iter])) {
dev_err(dev, "adsp reset dt %d not found\n",
iter);
return -EINVAL;
}
}
}
nvadsp_parse_clk_entries(pdev);
return 0;
}
static int __init nvadsp_probe(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv_data;
struct device *dev = &pdev->dev;
struct resource *res = NULL;
void __iomem *base = NULL;
uint32_t aram_addr;
uint32_t aram_size;
int dram_iter;
int irq_iter;
int ret = 0;
int iter;
dev_info(dev, "in probe()...\n");
drv_data = devm_kzalloc(dev, sizeof(*drv_data),
GFP_KERNEL);
if (!drv_data) {
dev_err(&pdev->dev, "Failed to allocate driver data");
ret = -ENOMEM;
goto out;
}
platform_set_drvdata(pdev, drv_data);
drv_data->pdev = pdev;
ret = nvadsp_parse_dt(pdev);
if (ret)
goto out;
#ifdef CONFIG_PM
ret = nvadsp_pm_init(pdev);
if (ret) {
dev_err(dev, "Failed in pm init");
goto out;
}
#endif
#ifdef CONFIG_DEBUG_FS
if (adsp_debug_init(drv_data))
dev_err(dev,
"unable to create tegra_ape debug fs directory\n");
#endif
drv_data->base_regs =
devm_kzalloc(dev, sizeof(void *) * APE_MAX_REG,
GFP_KERNEL);
if (!drv_data->base_regs) {
dev_err(dev, "Failed to allocate regs");
ret = -ENOMEM;
goto out;
}
for (iter = 0; iter < APE_MAX_REG; iter++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, iter);
if (!res) {
dev_err(dev,
"Failed to get resource with ID %d\n",
iter);
ret = -EINVAL;
goto out;
}
if (!drv_data->adsp_unit_fpga && iter == UNIT_FPGA_RST)
continue;
/*
* skip if the particular module is not present in a
* generation, for which the register start address
* is made 0 from dt.
*/
if (res->start == 0)
continue;
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
dev_err(dev, "Failed to iomap resource reg[%d]\n",
iter);
ret = PTR_ERR(base);
goto out;
}
drv_data->base_regs[iter] = base;
adsp_add_load_mappings(res->start, base,
resource_size(res));
}
drv_data->base_regs_saved = drv_data->base_regs;
for (dram_iter = 0; dram_iter < ADSP_MAX_DRAM_MAP; dram_iter++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, iter++);
if (!res) {
dev_err(dev,
"Failed to get DRAM map with ID %d\n", iter);
ret = -EINVAL;
goto out;
}
drv_data->dram_region[dram_iter] = res;
}
for (irq_iter = 0; irq_iter < NVADSP_VIRQ_MAX; irq_iter++) {
res = platform_get_resource(pdev, IORESOURCE_IRQ, irq_iter);
if (!res) {
dev_err(dev, "Failed to get irq number for index %d\n",
irq_iter);
ret = -EINVAL;
goto out;
}
drv_data->agic_irqs[irq_iter] = res->start;
}
nvadsp_drv_data = drv_data;
#ifdef CONFIG_PM
tegra_pd_add_device(dev);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto out;
#endif
ret = nvadsp_amc_init(pdev);
if (ret)
goto err;
ret = nvadsp_hwmbox_init(pdev);
if (ret)
goto err;
ret = nvadsp_mbox_init(pdev);
if (ret)
goto err;
#ifdef CONFIG_TEGRA_EMC_APE_DFS
ret = emc_dfs_init(pdev);
if (ret)
goto err;
#endif
#ifdef CONFIG_TEGRA_ADSP_ACTMON
ret = ape_actmon_probe(pdev);
if (ret)
goto err;
#endif
ret = nvadsp_os_probe(pdev);
if (ret)
goto err;
ret = nvadsp_reset_init(pdev);
if (ret) {
dev_err(dev, "Failed initialize resets\n");
goto err;
}
ret = nvadsp_app_module_probe(pdev);
if (ret)
goto err;
aram_addr = drv_data->adsp_mem[ARAM_ALIAS_0_ADDR];
aram_size = drv_data->adsp_mem[ARAM_ALIAS_0_SIZE];
ret = aram_init(aram_addr, aram_size);
if (ret)
dev_err(dev, "Failed to init aram\n");
err:
#ifdef CONFIG_PM
ret = pm_runtime_put_sync(dev);
if (ret < 0)
dev_err(dev, "pm_runtime_put_sync failed\n");
#endif
out:
return ret;
}
static int nvadsp_remove(struct platform_device *pdev)
{
#ifdef CONFIG_TEGRA_EMC_APE_DFS
emc_dfs_exit();
#endif
aram_exit();
pm_runtime_disable(&pdev->dev);
#ifdef CONFIG_PM
if (!pm_runtime_status_suspended(&pdev->dev))
nvadsp_runtime_suspend(&pdev->dev);
#endif
tegra_pd_remove_device(&pdev->dev);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id nvadsp_of_match[] = {
{ .compatible = "nvidia,tegra210-adsp", .data = NULL, },
{ .compatible = "nvidia,tegra18x-adsp", .data = NULL, },
{ .compatible = "nvidia,tegra18x-adsp-hv", .data = NULL, },
{},
};
#endif
static struct platform_driver nvadsp_driver __refdata = {
.driver = {
.name = "nvadsp",
.owner = THIS_MODULE,
.pm = &nvadsp_pm_ops,
.of_match_table = of_match_ptr(nvadsp_of_match),
},
.probe = nvadsp_probe,
.remove = nvadsp_remove,
};
static int __init nvadsp_init(void)
{
return platform_driver_register(&nvadsp_driver);
}
static void __exit nvadsp_exit(void)
{
platform_driver_unregister(&nvadsp_driver);
}
module_init(nvadsp_init);
module_exit(nvadsp_exit);
MODULE_AUTHOR("NVIDIA");
MODULE_DESCRIPTION("Tegra Host ADSP Driver");
MODULE_VERSION("1.0");
MODULE_LICENSE("Dual BSD/GPL");

View File

@@ -0,0 +1,183 @@
/*
* dev.h
*
* A header file for Host driver for ADSP and APE
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_DEV_H
#define __TEGRA_NVADSP_DEV_H
#include <linux/tegra_nvadsp.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/debugfs.h>
#include <linux/platform/tegra/emc_bwmgr.h>
#if defined(CONFIG_ARCH_TEGRA_21x_SOC)
#include "dev-t21x.h"
#else
#include "dev-t18x.h"
#endif /* CONFIG_ARCH_TEGRA_21x_SOC */
#include "hwmailbox.h"
#include "amc.h"
#include "os.h"
enum adsp_unit_fpga_reset {
ADSP_ASSERT,
ADSP_DEASSERT,
ADSP_UNIT_FPGA_RESET_END,
};
#define AMISC_REGS 0x2000
#define AMISC_ADSP_L2_REGFILEBASE 0x10
#define AMISC_SHRD_SMP_STA 0x14
#define AMISC_SEM_REG_START 0x1c
#define AMISC_SEM_REG_END 0x44
#define AMISC_TSC 0x48
#define AMISC_ACTMON_AVG_CNT 0x81c
#define AMISC_REG_START_OFFSET 0x0
#define AMISC_REG_MBOX_OFFSET 0x64
#define ADSP_ACTMON_REG_START_OFFSET 0x800
#define ADSP_ACTMON_REG_END_OFFSET 0x828
enum nvadsp_virqs {
MBOX_SEND_VIRQ,
MBOX_RECV_VIRQ,
WDT_VIRQ,
WFI_VIRQ,
AMC_ERR_VIRQ,
ACTMON_VIRQ,
NVADSP_VIRQ_MAX,
};
struct nvadsp_pm_state {
u32 aram[AMC_ARAM_WSIZE];
uint32_t amc_regs[AMC_REGS];
uint32_t amisc_regs[AMISC_REGS];
u32 evp[AMC_EVP_WSIZE];
void *evp_ptr;
};
struct nvadsp_drv_data {
void __iomem **base_regs;
void __iomem **base_regs_saved;
struct platform_device *pdev;
struct resource *dram_region[ADSP_MAX_DRAM_MAP];
struct hwmbox_queue hwmbox_send_queue;
int hwmbox_send_virq;
int hwmbox_recv_virq;
struct nvadsp_mbox **mboxes;
unsigned long *mbox_ids;
spinlock_t mbox_lock;
#ifdef CONFIG_DEBUG_FS
struct dentry *adsp_debugfs_root;
#endif
struct clk *ape_clk;
struct clk *apb2ape_clk;
struct clk *adsp_clk;
struct clk *adsp_cpu_clk;
struct clk *adsp_neon_clk;
struct clk *ape_emc_clk;
struct clk *uartape_clk;
struct clk *ahub_clk;
unsigned long adsp_freq; /* in KHz*/
unsigned long adsp_freq_hz; /* in Hz*/
unsigned long ape_freq; /* in KHz*/
unsigned long ape_emc_freq; /* in KHz*/
int (*runtime_suspend)(struct device *dev);
int (*runtime_resume)(struct device *dev);
int (*runtime_idle)(struct device *dev);
int (*assert_adsp)(struct nvadsp_drv_data *drv_data);
int (*deassert_adsp)(struct nvadsp_drv_data *drv_data);
struct reset_control *adspall_rst;
struct nvadsp_pm_state state;
bool adsp_os_running;
bool adsp_os_suspended;
bool adsp_os_secload;
void *shared_adsp_os_data;
#ifdef CONFIG_TEGRA_ADSP_DFS
bool dfs_initialized;
#endif
#ifdef CONFIG_TEGRA_ADSP_ACTMON
bool actmon_initialized;
#endif
#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
bool cpustat_initialized;
#endif
#if defined(CONFIG_TEGRA_ADSP_FILEIO)
bool adspff_init;
#endif
u32 adsp_mem[ADSP_MEM_END];
bool adsp_unit_fpga;
u32 unit_fpga_reset[ADSP_UNIT_FPGA_RESET_END];
int agic_irqs[NVADSP_VIRQ_MAX];
struct tegra_bwmgr_client *bwmgr;
};
#define ADSP_CONFIG 0x04
#define MAXCLKLATENCY (3 << 29)
#define UART_BAUD_RATE 9600
status_t nvadsp_mbox_init(struct platform_device *pdev);
status_t nvadsp_amc_init(struct platform_device *pdev);
#ifdef CONFIG_TEGRA_ADSP_DFS
void adsp_cpu_set_rate(unsigned long freq);
int adsp_dfs_core_init(struct platform_device *pdev);
int adsp_dfs_core_exit(struct platform_device *pdev);
u32 adsp_to_emc_freq(u32 adspfreq);
#endif
#ifdef CONFIG_TEGRA_ADSP_ACTMON
int ape_actmon_probe(struct platform_device *pdev);
#endif
#ifdef CONFIG_TEGRA_ADSP_CPUSTAT
int adsp_cpustat_init(struct platform_device *pdev);
int adsp_cpustat_exit(struct platform_device *pdev);
#endif
#if defined(CONFIG_TEGRA_ADSP_FILEIO)
int adspff_init(void);
void adspff_exit(void);
#endif
#ifdef CONFIG_TEGRA_EMC_APE_DFS
status_t emc_dfs_init(struct platform_device *pdev);
void emc_dfs_exit(void);
#endif
#ifdef CONFIG_PM
int __init nvadsp_pm_init(struct platform_device *pdev);
#endif
int __init nvadsp_reset_init(struct platform_device *pdev);
#endif /* __TEGRA_NVADSP_DEV_H */

View File

@@ -0,0 +1,110 @@
/*
* dram_app_mem_manager.c
*
* dram app memory manager for allocating memory for text,bss and data
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include "dram_app_mem_manager.h"
#define ALIGN_TO_ADSP_PAGE(x) ALIGN(x, 4096)
static void *dram_app_mem_handle;
static LIST_HEAD(dram_app_mem_alloc_list);
static LIST_HEAD(dram_app_mem_free_list);
void dram_app_mem_print(void)
{
mem_print(dram_app_mem_handle);
}
EXPORT_SYMBOL(dram_app_mem_print);
void *dram_app_mem_request(const char *name, size_t size)
{
return mem_request(dram_app_mem_handle, name, ALIGN_TO_ADSP_PAGE(size));
}
EXPORT_SYMBOL(dram_app_mem_request);
bool dram_app_mem_release(void *handle)
{
return mem_release(dram_app_mem_handle, handle);
}
EXPORT_SYMBOL(dram_app_mem_release);
unsigned long dram_app_mem_get_address(void *handle)
{
return mem_get_address(handle);
}
EXPORT_SYMBOL(dram_app_mem_get_address);
#ifdef CONFIG_DEBUG_FS
static struct dentry *dram_app_mem_dump_debugfs_file;
static int dram_app_mem_dump(struct seq_file *s, void *data)
{
mem_dump(dram_app_mem_handle, s);
return 0;
}
static int dram_app_mem_dump_open(struct inode *inode, struct file *file)
{
return single_open(file, dram_app_mem_dump, inode->i_private);
}
static const struct file_operations dram_app_mem_dump_fops = {
.open = dram_app_mem_dump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
int dram_app_mem_init(unsigned long start, unsigned long size)
{
dram_app_mem_handle =
create_mem_manager("DRAM_APP_MANAGER", start, size);
if (IS_ERR(dram_app_mem_handle)) {
pr_err("ERROR: failed to create aram memory_manager");
return PTR_ERR(dram_app_mem_handle);
}
#ifdef CONFIG_DEBUG_FS
dram_app_mem_dump_debugfs_file =
debugfs_create_file("dram_app_mem_dump",
S_IRUSR, NULL, NULL, &dram_app_mem_dump_fops);
if (!dram_app_mem_dump_debugfs_file) {
pr_err("ERROR: failed to create dram_app_mem_dump debugfs");
destroy_mem_manager(dram_app_mem_handle);
return -ENOMEM;
}
#endif
return 0;
}
EXPORT_SYMBOL(dram_app_mem_init);
void dram_app_mem_exit(void)
{
#ifdef CONFIG_DEBUG_FS
debugfs_remove(dram_app_mem_dump_debugfs_file);
#endif
destroy_mem_manager(dram_app_mem_handle);
}
EXPORT_SYMBOL(dram_app_mem_exit);

View File

@@ -0,0 +1,30 @@
/*
* Header file for dram app memory manager
*
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H
#define __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H
#include "mem_manager.h"
int dram_app_mem_init(unsigned long, unsigned long);
void dram_app_mem_exit(void);
void *dram_app_mem_request(const char *name, size_t size);
bool dram_app_mem_release(void *handle);
unsigned long dram_app_mem_get_address(void *handle);
void dram_app_mem_print(void);
#endif /* __TEGRA_NVADSP_DRAM_APP_MEM_MANAGER_H */

View File

@@ -0,0 +1,465 @@
/*
* emc_dfs.c
*
* Emc dynamic frequency scaling due to APE
*
* Copyright (C) 2014-2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
#include <linux/tick.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/kthread.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include "dev.h"
/* Register offsets */
#define ABRIDGE_STATS_READ_0 0x04
#define ABRIDGE_STATS_WRITE_0 0x0c
#define ABRIDGE_STATS_CLEAR_0 0x1b
#define ABRIDGE_STATS_HI_0FFSET 0x04
/* Sample Period in usecs */
#define DEFAULT_SAMPLE_PERIOD 500000
#define INT_SHIFT 32
#define make64(hi, low) ((((u64)hi) << INT_SHIFT) | (low))
#define SCALING_DIVIDER 2
#define BOOST_DOWN_COUNT 2
#define DEFAULT_BOOST_UP_THRESHOLD 2000000;
#define DEFAULT_BOOST_STEP 2
struct emc_dfs_info {
void __iomem *abridge_base;
struct timer_list cnt_timer;
u64 rd_cnt;
u64 wr_cnt;
bool enable;
u64 avg_cnt;
unsigned long timer_rate;
ktime_t prev_time;
u32 dn_count;
u32 boost_dn_count;
u64 boost_up_threshold;
u8 boost_step;
struct work_struct clk_set_work;
unsigned long cur_freq;
bool speed_change_flag;
unsigned long max_freq;
struct clk *emcclk;
};
static struct emc_dfs_info global_emc_info;
static struct emc_dfs_info *einfo;
static struct task_struct *speedchange_task;
static spinlock_t speedchange_lock;
static u64 read64(u32 offset)
{
u32 low;
u32 hi;
low = readl(einfo->abridge_base + offset);
hi = readl(einfo->abridge_base + (offset + ABRIDGE_STATS_HI_0FFSET));
return make64(hi, low);
}
static unsigned long count_to_emcfreq(void)
{
unsigned long tfreq = 0;
if (!einfo->avg_cnt) {
if (einfo->dn_count >= einfo->boost_dn_count) {
tfreq = einfo->cur_freq / SCALING_DIVIDER;
einfo->dn_count = 0;
} else
einfo->dn_count++;
} else if (einfo->avg_cnt >= einfo->boost_up_threshold) {
if (einfo->boost_step)
tfreq = einfo->cur_freq * einfo->boost_step;
}
pr_debug("%s:avg_cnt: %llu current freq(kHz): %lu target freq(kHz): %lu\n",
__func__, einfo->avg_cnt, einfo->cur_freq, tfreq);
return tfreq;
}
static int clk_work(void *data)
{
int ret;
if (einfo->emcclk && einfo->speed_change_flag && einfo->cur_freq) {
ret = clk_set_rate(einfo->emcclk, einfo->cur_freq * 1000);
if (ret) {
pr_err("failed to set ape.emc freq:%d\n", ret);
BUG_ON(ret);
}
einfo->cur_freq = clk_get_rate(einfo->emcclk) / 1000;
pr_info("ape.emc: setting emc clk: %lu\n", einfo->cur_freq);
}
mod_timer(&einfo->cnt_timer,
jiffies + usecs_to_jiffies(einfo->timer_rate));
return 0;
}
static void emc_dfs_timer(unsigned long data)
{
u64 cur_cnt;
u64 delta_cnt;
u64 prev_cnt;
u64 delta_time;
ktime_t now;
unsigned long target_freq;
unsigned long flags;
spin_lock_irqsave(&speedchange_lock, flags);
/* Return if emc dfs is disabled */
if (!einfo->enable) {
spin_unlock_irqrestore(&speedchange_lock, flags);
return;
}
prev_cnt = einfo->rd_cnt + einfo->wr_cnt;
einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
pr_debug("einfo->rd_cnt: %llu einfo->wr_cnt: %llu\n",
einfo->rd_cnt, einfo->wr_cnt);
cur_cnt = einfo->rd_cnt + einfo->wr_cnt;
delta_cnt = cur_cnt - prev_cnt;
now = ktime_get();
delta_time = ktime_to_ns(ktime_sub(now, einfo->prev_time));
if (!delta_time) {
pr_err("%s: time interval to calculate emc scaling is zero\n",
__func__);
spin_unlock_irqrestore(&speedchange_lock, flags);
goto exit;
}
einfo->prev_time = now;
einfo->avg_cnt = delta_cnt / delta_time;
/* if 0: no scaling is required */
target_freq = count_to_emcfreq();
if (!target_freq) {
einfo->speed_change_flag = false;
} else {
einfo->cur_freq = target_freq;
einfo->speed_change_flag = true;
}
spin_unlock_irqrestore(&speedchange_lock, flags);
pr_info("einfo->avg_cnt: %llu delta_cnt: %llu delta_time %llu emc_freq:%lu\n",
einfo->avg_cnt, delta_cnt, delta_time, einfo->cur_freq);
exit:
wake_up_process(speedchange_task);
}
static void emc_dfs_enable(void)
{
einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
einfo->prev_time = ktime_get();
mod_timer(&einfo->cnt_timer, jiffies + 2);
}
static void emc_dfs_disable(void)
{
einfo->rd_cnt = read64((u32)ABRIDGE_STATS_READ_0);
einfo->wr_cnt = read64((u32)ABRIDGE_STATS_WRITE_0);
del_timer_sync(&einfo->cnt_timer);
}
#ifdef CONFIG_DEBUG_FS
static struct dentry *emc_dfs_root;
#define RW_MODE (S_IWUSR | S_IRUSR)
#define RO_MODE S_IRUSR
/* Get emc dfs staus: 0: disabled 1:enabled */
static int dfs_enable_get(void *data, u64 *val)
{
*val = einfo->enable;
return 0;
}
/* Enable/disable emc dfs */
static int dfs_enable_set(void *data, u64 val)
{
einfo->enable = (bool) val;
/*
* If enabling: activate a timer to execute in next 2 jiffies,
* so that emc scaled value takes effect immidiately.
*/
if (einfo->enable)
emc_dfs_enable();
else
emc_dfs_disable();
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(enable_fops, dfs_enable_get,
dfs_enable_set, "%llu\n");
/* Get emc dfs staus: 0: disabled 1:enabled */
static int boost_up_threshold_get(void *data, u64 *val)
{
*val = einfo->boost_up_threshold;
return 0;
}
/* Enable/disable emc dfs */
static int boost_up_threshold_set(void *data, u64 val)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&speedchange_lock, flags);
if (!einfo->enable) {
pr_info("EMC dfs is not enabled\n");
ret = -EINVAL;
goto err;
}
if (val)
einfo->boost_up_threshold = val;
err:
spin_unlock_irqrestore(&speedchange_lock, flags);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(up_threshold_fops,
boost_up_threshold_get, boost_up_threshold_set, "%llu\n");
/* scaling emc freq in multiple of boost factor */
static int boost_step_get(void *data, u64 *val)
{
*val = einfo->boost_step;
return 0;
}
/* Set period in usec */
static int boost_step_set(void *data, u64 val)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&speedchange_lock, flags);
if (!einfo->enable) {
pr_info("EMC dfs is not enabled\n");
ret = -EINVAL;
goto err;
}
if (!val)
einfo->boost_step = 1;
else
einfo->boost_step = (u8) val;
err:
spin_unlock_irqrestore(&speedchange_lock, flags);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(boost_fops, boost_step_get,
boost_step_set, "%llu\n");
/* minimum time after that emc scaling down happens in usec */
static int boost_down_count_get(void *data, u64 *val)
{
*val = einfo->boost_dn_count;
return 0;
}
/* Set period in usec */
static int boost_down_count_set(void *data, u64 val)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&speedchange_lock, flags);
if (!einfo->enable) {
pr_info("EMC dfs is not enabled\n");
ret = -EINVAL;
goto err;
}
if (val)
einfo->boost_dn_count = (u32) val;
ret = 0;
err:
spin_unlock_irqrestore(&speedchange_lock, flags);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(down_cnt_fops, boost_down_count_get,
boost_down_count_set, "%llu\n");
static int period_get(void *data, u64 *val)
{
*val = einfo->timer_rate;
return 0;
}
/* Set period in usec */
static int period_set(void *data, u64 val)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&speedchange_lock, flags);
if (!einfo->enable) {
pr_info("EMC dfs is not enabled\n");
ret = -EINVAL;
goto err;
}
if (val)
einfo->timer_rate = (unsigned long)val;
err:
spin_unlock_irqrestore(&speedchange_lock, flags);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(period_fops, period_get, period_set, "%llu\n");
static int emc_dfs_debugfs_init(struct nvadsp_drv_data *drv)
{
int ret = -ENOMEM;
struct dentry *d;
if (!drv->adsp_debugfs_root)
return ret;
emc_dfs_root = debugfs_create_dir("emc_dfs", drv->adsp_debugfs_root);
if (!emc_dfs_root)
goto err_out;
d = debugfs_create_file("enable", RW_MODE, emc_dfs_root, NULL,
&enable_fops);
if (!d)
goto err_root;
d = debugfs_create_file("boost_up_threshold", RW_MODE, emc_dfs_root,
NULL, &up_threshold_fops);
if (!d)
goto err_root;
d = debugfs_create_file("boost_step", RW_MODE, emc_dfs_root, NULL,
&boost_fops);
if (!d)
goto err_root;
d = debugfs_create_file("boost_down_count", RW_MODE, emc_dfs_root,
NULL, &down_cnt_fops);
if (!d)
goto err_root;
d = debugfs_create_file("period", RW_MODE, emc_dfs_root, NULL,
&period_fops);
if (!d)
goto err_root;
return 0;
err_root:
debugfs_remove_recursive(emc_dfs_root);
err_out:
return ret;
}
#endif
status_t __init emc_dfs_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
int ret = 0;
einfo = &global_emc_info;
einfo->abridge_base = drv->base_regs[ABRIDGE];
einfo->emcclk = clk_get_sys("ape", "emc");
if (IS_ERR_OR_NULL(einfo->emcclk)) {
dev_info(&pdev->dev, "unable to find ape.emc clock\n");
return PTR_ERR(einfo->emcclk);
}
einfo->timer_rate = DEFAULT_SAMPLE_PERIOD;
einfo->boost_up_threshold = DEFAULT_BOOST_UP_THRESHOLD;
einfo->boost_step = DEFAULT_BOOST_STEP;
einfo->dn_count = 0;
einfo->boost_dn_count = BOOST_DOWN_COUNT;
einfo->enable = 1;
einfo->max_freq = clk_round_rate(einfo->emcclk, ULONG_MAX);
ret = clk_set_rate(einfo->emcclk, einfo->max_freq);
if (ret) {
dev_info(&pdev->dev, "failed to set ape.emc freq:%d\n", ret);
return PTR_ERR(einfo->emcclk);
}
einfo->max_freq /= 1000;
einfo->cur_freq = clk_get_rate(einfo->emcclk) / 1000;
if (!einfo->cur_freq) {
dev_info(&pdev->dev, "ape.emc freq is NULL:\n");
return PTR_ERR(einfo->emcclk);
}
dev_info(&pdev->dev, "einfo->cur_freq %lu\n", einfo->cur_freq);
spin_lock_init(&speedchange_lock);
init_timer(&einfo->cnt_timer);
einfo->cnt_timer.function = emc_dfs_timer;
speedchange_task = kthread_create(clk_work, NULL, "emc_dfs");
if (IS_ERR(speedchange_task))
return PTR_ERR(speedchange_task);
sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
get_task_struct(speedchange_task);
/* NB: wake up so the thread does not look hung to the freezer */
wake_up_process(speedchange_task);
emc_dfs_enable();
dev_info(&pdev->dev, "APE EMC DFS is initialized\n");
#ifdef CONFIG_DEBUG_FS
emc_dfs_debugfs_init(drv);
#endif
return ret;
}
void __exit emc_dfs_exit(void)
{
kthread_stop(speedchange_task);
put_task_struct(speedchange_task);
}

View File

@@ -0,0 +1,25 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __HWMAILBOX_T21X_H
#define __HWMAILBOX_T21X_H
#define HWMB_REG_IDX AMISC
/* Mailbox register Offsets in AMISC */
#define HWMBOX0_REG 0x58
#define HWMBOX1_REG 0X5C
#define HWMBOX2_REG 0x60
#define HWMBOX3_REG 0x64
#endif /* __HWMAILBOX_T21X_H */

View File

@@ -0,0 +1,294 @@
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/atomic.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/platform_device.h>
#include <linux/tegra_nvadsp.h>
#include <linux/irqchip/tegra-agic.h>
#include "dev.h"
/*
* Mailbox 0 is for receiving messages
* from ADSP i.e. CPU <-- ADSP.
*/
#define RECV_HWMBOX HWMBOX0_REG
#define INT_RECV_HWMBOX INT_AMISC_MBOX_FULL0
/*
* Mailbox 1 is for sending messages
* to ADSP i.e. CPU --> ADSP
*/
#define SEND_HWMBOX HWMBOX1_REG
#define INT_SEND_HWMBOX INT_AMISC_MBOX_EMPTY1
static struct platform_device *nvadsp_pdev;
static struct nvadsp_drv_data *nvadsp_drv_data;
/* Initialized to false by default */
static bool is_hwmbox_busy;
#ifdef CONFIG_MBOX_ACK_HANDLER
static int hwmbox_last_msg;
#endif
static inline u32 hwmbox_readl(u32 reg)
{
return readl(nvadsp_drv_data->base_regs[HWMB_REG_IDX] + reg);
}
static inline void hwmbox_writel(u32 val, u32 reg)
{
writel(val, nvadsp_drv_data->base_regs[HWMB_REG_IDX] + reg);
}
#define PRINT_HWMBOX(x) \
dev_info(&nvadsp_pdev->dev, "%s: 0x%x\n", #x, hwmbox_readl(x))
void dump_mailbox_regs(void)
{
dev_info(&nvadsp_pdev->dev, "dumping hwmailbox registers ...\n");
PRINT_HWMBOX(RECV_HWMBOX);
PRINT_HWMBOX(SEND_HWMBOX);
dev_info(&nvadsp_pdev->dev, "end of dump ....\n");
}
static void hwmboxq_init(struct hwmbox_queue *queue)
{
queue->head = 0;
queue->tail = 0;
queue->count = 0;
init_completion(&queue->comp);
spin_lock_init(&queue->lock);
}
/* Must be called with queue lock held in non-interrupt context */
static inline bool
is_hwmboxq_empty(struct hwmbox_queue *queue)
{
return (queue->count == 0);
}
/* Must be called with queue lock held in non-interrupt context */
static inline bool
is_hwmboxq_full(struct hwmbox_queue *queue)
{
return (queue->count == HWMBOX_QUEUE_SIZE);
}
/* Must be called with queue lock held in non-interrupt context */
static status_t hwmboxq_enqueue(struct hwmbox_queue *queue,
uint32_t data)
{
int ret = 0;
if (is_hwmboxq_full(queue)) {
ret = -EBUSY;
goto comp;
}
queue->array[queue->tail] = data;
queue->tail = (queue->tail + 1) & HWMBOX_QUEUE_SIZE_MASK;
queue->count++;
if (is_hwmboxq_full(queue))
goto comp;
else
goto out;
comp:
reinit_completion(&queue->comp);
out:
return ret;
}
status_t nvadsp_hwmbox_send_data(uint16_t mid, uint32_t data, uint32_t flags)
{
spinlock_t *lock = &nvadsp_drv_data->hwmbox_send_queue.lock;
unsigned long lockflags;
int ret = 0;
if (flags & NVADSP_MBOX_SMSG) {
data = PREPARE_HWMBOX_SMSG(mid, data);
pr_debug("nvadsp_mbox_send: data: 0x%x\n", data);
}
/* TODO handle LMSG */
spin_lock_irqsave(lock, lockflags);
if (!is_hwmbox_busy) {
is_hwmbox_busy = true;
pr_debug("nvadsp_mbox_send: empty mailbox. write to mailbox.\n");
#ifdef CONFIG_MBOX_ACK_HANDLER
hwmbox_last_msg = data;
#endif
hwmbox_writel(data, SEND_HWMBOX);
} else {
pr_debug("nvadsp_mbox_send: enqueue data\n");
ret = hwmboxq_enqueue(&nvadsp_drv_data->hwmbox_send_queue,
data);
}
spin_unlock_irqrestore(lock, lockflags);
return ret;
}
/* Must be called with queue lock held in non-interrupt context */
static status_t hwmboxq_dequeue(struct hwmbox_queue *queue,
uint32_t *data)
{
int ret = 0;
if (is_hwmboxq_empty(queue)) {
ret = -EBUSY;
goto out;
}
if (is_hwmboxq_full(queue))
complete_all(&nvadsp_drv_data->hwmbox_send_queue.comp);
*data = queue->array[queue->head];
queue->head = (queue->head + 1) & HWMBOX_QUEUE_SIZE_MASK;
queue->count--;
out:
return ret;
}
static irqreturn_t hwmbox_send_empty_int_handler(int irq, void *devid)
{
spinlock_t *lock = &nvadsp_drv_data->hwmbox_send_queue.lock;
struct device *dev = &nvadsp_pdev->dev;
unsigned long lockflags;
uint32_t data;
int ret;
spin_lock_irqsave(lock, lockflags);
data = hwmbox_readl(SEND_HWMBOX);
if (data != PREPARE_HWMBOX_EMPTY_MSG())
dev_err(dev, "last mailbox sent failed with 0x%x\n", data);
#ifdef CONFIG_MBOX_ACK_HANDLER
{
uint16_t last_mboxid = HWMBOX_SMSG_MID(hwmbox_last_msg);
struct nvadsp_mbox *mbox = nvadsp_drv_data->mboxes[last_mboxid];
if (mbox) {
nvadsp_mbox_handler_t ack_handler = mbox->ack_handler;
if (ack_handler) {
uint32_t msg = HWMBOX_SMSG_MSG(hwmbox_last_msg);
ack_handler(msg, mbox->hdata);
}
}
}
#endif
ret = hwmboxq_dequeue(&nvadsp_drv_data->hwmbox_send_queue,
&data);
if (ret == 0) {
#ifdef CONFIG_MBOX_ACK_HANDLER
hwmbox_last_msg = data;
#endif
hwmbox_writel(data, SEND_HWMBOX);
dev_dbg(dev, "Writing 0x%x to SEND_HWMBOX\n", data);
} else {
is_hwmbox_busy = false;
}
spin_unlock_irqrestore(lock, lockflags);
return IRQ_HANDLED;
}
static irqreturn_t hwmbox_recv_full_int_handler(int irq, void *devid)
{
uint32_t data;
int ret;
data = hwmbox_readl(RECV_HWMBOX);
hwmbox_writel(PREPARE_HWMBOX_EMPTY_MSG(), RECV_HWMBOX);
if (IS_HWMBOX_MSG_SMSG(data)) {
uint16_t mboxid = HWMBOX_SMSG_MID(data);
struct nvadsp_mbox *mbox = nvadsp_drv_data->mboxes[mboxid];
if (!mbox) {
dev_info(&nvadsp_pdev->dev,
"Failed to get mbox for mboxid: %u\n",
mboxid);
goto out;
}
if (mbox->handler) {
mbox->handler(HWMBOX_SMSG_MSG(data), mbox->hdata);
} else {
ret = nvadsp_mboxq_enqueue(&mbox->recv_queue,
HWMBOX_SMSG_MSG(data));
if (ret) {
dev_info(&nvadsp_pdev->dev,
"Failed to deliver msg 0x%x to"
" mbox id %u\n",
HWMBOX_SMSG_MSG(data), mboxid);
goto out;
}
}
} else if (IS_HWMBOX_MSG_LMSG(data)) {
/* TODO */
}
out:
return IRQ_HANDLED;
}
int __init nvadsp_hwmbox_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
int recv_virq, send_virq;
int ret = 0;
nvadsp_pdev = pdev;
nvadsp_drv_data = drv;
recv_virq = drv->agic_irqs[MBOX_RECV_VIRQ];
drv->hwmbox_recv_virq = recv_virq;
send_virq = drv->agic_irqs[MBOX_SEND_VIRQ];
drv->hwmbox_send_virq = send_virq;
ret = request_irq(recv_virq, hwmbox_recv_full_int_handler,
IRQF_TRIGGER_RISING, "hwmbox0_recv_full", pdev);
if (ret)
goto req_recv_virq;
ret = request_irq(send_virq, hwmbox_send_empty_int_handler,
IRQF_TRIGGER_RISING,
"hwmbox1_send_empty", pdev);
if (ret)
goto req_send_virq;
hwmboxq_init(&drv->hwmbox_send_queue);
return ret;
req_send_virq:
free_irq(recv_virq, pdev);
req_recv_virq:
irq_dispose_mapping(send_virq);
irq_dispose_mapping(recv_virq);
nvadsp_drv_data = NULL;
nvadsp_pdev = NULL;
return ret;
}

View File

@@ -0,0 +1,116 @@
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __HWMAILBOX_H
#define __HWMAILBOX_H
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#if defined(CONFIG_ARCH_TEGRA_21x_SOC)
#include "hwmailbox-t21x.h"
#else
#include "hwmailbox-t18x.h"
#endif /* CONFIG_ARCH_TEGRA_21x_SOC */
/*
* The interpretation of hwmailbox content is:
* 31 30 29 0
* [TAG|TYPE|MESSAGE]
*/
#define HWMBOX_TAG_SHIFT 31
#define HWMBOX_TAG_MASK 0x1
#define HWMBOX_TAG_INVALID 0
#define HWMBOX_TAG_VALID 1
/* Set Invalid TAG */
#define SET_HWMBOX_TAG_INVALID (HWMBOX_TAG_INVALID << HWMBOX_TAG_SHIFT)
/* Set Valid TAG */
#define SET_HWMBOX_TAG_VALID (HWMBOX_TAG_VALID << HWMBOX_TAG_SHIFT)
/* Get current TAG */
#define HWMBOX_TAG(val) ((val & HWMBOX_TAG_MASK) << HWMBOX_TAG_SHIFT)
/*
* Mailbox can be used for sending short messages and long messages
*/
#define HWMBOX_MSG_TYPE_SHIFT 30
#define HWMBOX_MSG_TYPE_MASK 0x1
#define HWMBOX_MSG_SMSG 0
#define HWMBOX_MSG_LMSG 1
/* Set SMSG type */
#define SET_HWMBOX_MSG_SMSG (HWMBOX_MSG_SMSG << HWMBOX_MSG_TYPE_SHIFT)
/* Set LMSG type */
#define SET_HWMBOX_MSG_LMSG (HWMBOX_MSG_LMSG << HWMBOX_MSG_TYPE_SHIFT)
/* Get MSG type */
#define HWMBOX_MSG_TYPE(val) \
((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK)
/* Check if SMSG */
#define IS_HWMBOX_MSG_SMSG(val) \
(!((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK))
/* Check if LMSG */
#define IS_HWMBOX_MSG_LMSG(val) \
((val >> HWMBOX_MSG_TYPE_SHIFT) & HWMBOX_MSG_TYPE_MASK)
/*
* The format for a short message is:
* 31 30 29 20 19 0
* [TAG|TYPE|MBOX ID|SHORT MESSAGE]
* 1b 1b 10bits 20bits
*/
#define HWMBOX_SMSG_SHIFT 0
#define HWMBOX_SMSG_MASK 0x3FFFFFFF
#define HWMBOX_SMSG(val) ((val >> HWMBOX_SMSG_SHIFT) & HWMBOX_SMSG_MASK)
#define HWMBOX_SMSG_MID_SHIFT 20
#define HWMBOX_SMSG_MID_MASK 0x3FF
#define HWMBOX_SMSG_MID(val) \
((val >> HWMBOX_SMSG_MID_SHIFT) & HWMBOX_SMSG_MID_MASK)
#define HWMBOX_SMSG_MSG_SHIFT 0
#define HWMBOX_SMSG_MSG_MASK 0xFFFFF
#define HWMBOX_SMSG_MSG(val) \
((val >> HWMBOX_SMSG_MSG_SHIFT) & HWMBOX_SMSG_MSG_MASK)
/* Set mailbox id for a short message */
#define SET_HWMBOX_SMSG_MID(val) \
((val & HWMBOX_SMSG_MID_MASK) << HWMBOX_SMSG_MID_SHIFT)
/* Set msg value in a short message */
#define SET_HWMBOX_SMSG_MSG(val) \
((val & HWMBOX_SMSG_MSG_MASK) << HWMBOX_SMSG_MSG_SHIFT)
/* Prepare a small message with mailbox id and data */
#define PREPARE_HWMBOX_SMSG(mid, data) (SET_HWMBOX_TAG_VALID | \
SET_HWMBOX_MSG_SMSG | \
SET_HWMBOX_SMSG_MID(mid) | \
SET_HWMBOX_SMSG_MSG(data))
/* Prepare empty mailbox value */
#define PREPARE_HWMBOX_EMPTY_MSG() (HWMBOX_TAG_INVALID | 0x0)
/*
* Queue size must be power of 2 as '&' op
* is being used to manage circular queues
*/
#define HWMBOX_QUEUE_SIZE 1024
#define HWMBOX_QUEUE_SIZE_MASK (HWMBOX_QUEUE_SIZE - 1)
struct hwmbox_queue {
uint32_t array[HWMBOX_QUEUE_SIZE];
uint16_t head;
uint16_t tail;
uint16_t count;
struct completion comp;
spinlock_t lock;
};
int nvadsp_hwmbox_init(struct platform_device *);
status_t nvadsp_hwmbox_send_data(uint16_t, uint32_t, uint32_t);
void dump_mailbox_regs(void);
#endif /* __HWMAILBOX_H */

View File

@@ -0,0 +1,317 @@
/*
* ADSP mailbox manager
*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "dev.h"
#define NVADSP_MAILBOX_START 512
#define NVADSP_MAILBOX_MAX 1024
#define NVADSP_MAILBOX_OS_MAX 16
static struct nvadsp_mbox *nvadsp_mboxes[NVADSP_MAILBOX_MAX];
static DECLARE_BITMAP(nvadsp_mbox_ids, NVADSP_MAILBOX_MAX);
static struct nvadsp_drv_data *nvadsp_drv_data;
static inline bool is_mboxq_empty(struct nvadsp_mbox_queue *queue)
{
return (queue->count == 0);
}
static inline bool is_mboxq_full(struct nvadsp_mbox_queue *queue)
{
return (queue->count == NVADSP_MBOX_QUEUE_SIZE);
}
static void mboxq_init(struct nvadsp_mbox_queue *queue)
{
queue->head = 0;
queue->tail = 0;
queue->count = 0;
init_completion(&queue->comp);
spin_lock_init(&queue->lock);
}
static void mboxq_destroy(struct nvadsp_mbox_queue *queue)
{
if (!is_mboxq_empty(queue))
pr_info("Mbox queue %p is not empty.\n", queue);
queue->head = 0;
queue->tail = 0;
queue->count = 0;
}
static status_t mboxq_enqueue(struct nvadsp_mbox_queue *queue,
uint32_t data)
{
int ret = 0;
if (is_mboxq_full(queue)) {
ret = -EINVAL;
goto out;
}
if (is_mboxq_empty(queue))
complete_all(&queue->comp);
queue->array[queue->tail] = data;
queue->tail = (queue->tail + 1) & NVADSP_MBOX_QUEUE_SIZE_MASK;
queue->count++;
out:
return ret;
}
status_t nvadsp_mboxq_enqueue(struct nvadsp_mbox_queue *queue,
uint32_t data)
{
return mboxq_enqueue(queue, data);
}
static status_t mboxq_dequeue(struct nvadsp_mbox_queue *queue,
uint32_t *data)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&queue->lock, flags);
if (is_mboxq_empty(queue)) {
ret = -EBUSY;
goto comp;
}
*data = queue->array[queue->head];
queue->head = (queue->head + 1) & NVADSP_MBOX_QUEUE_SIZE_MASK;
queue->count--;
if (is_mboxq_empty(queue))
goto comp;
else
goto out;
comp:
reinit_completion(&queue->comp);
out:
spin_unlock_irqrestore(&queue->lock, flags);
return ret;
}
static uint16_t nvadsp_mbox_alloc_mboxid(void)
{
unsigned long start = NVADSP_MAILBOX_START;
unsigned int nr = 1;
unsigned long align = 0;
uint16_t mid;
mid = bitmap_find_next_zero_area(nvadsp_drv_data->mbox_ids,
NVADSP_MAILBOX_MAX - 1,
start, nr, align);
bitmap_set(nvadsp_drv_data->mbox_ids, mid, 1);
return mid;
}
static status_t nvadsp_mbox_free_mboxid(uint16_t mid)
{
bitmap_clear(nvadsp_drv_data->mbox_ids, mid, 1);
return 0;
}
status_t nvadsp_mbox_open(struct nvadsp_mbox *mbox, uint16_t *mid,
const char *name, nvadsp_mbox_handler_t handler,
void *hdata)
{
unsigned long flags;
int ret = 0;
if (!nvadsp_drv_data) {
ret = -ENOSYS;
goto err;
}
spin_lock_irqsave(&nvadsp_drv_data->mbox_lock, flags);
if (!mbox) {
ret = -EINVAL;
goto out;
}
if (*mid == 0) {
mbox->id = nvadsp_mbox_alloc_mboxid();
if (mbox->id >= NVADSP_MAILBOX_MAX) {
ret = -ENOMEM;
mbox->id = 0;
goto out;
}
*mid = mbox->id;
} else {
if (*mid >= NVADSP_MAILBOX_MAX) {
pr_debug("%s: Invalid mailbox %d.\n",
__func__, *mid);
ret = -EINVAL;
goto out;
}
if (nvadsp_drv_data->mboxes[*mid]) {
pr_debug("%s: mailbox %d already opened.\n",
__func__, *mid);
ret = -EINVAL;
goto out;
}
mbox->id = *mid;
}
strncpy(mbox->name, name, NVADSP_MBOX_NAME_MAX);
mboxq_init(&mbox->recv_queue);
mbox->handler = handler;
mbox->hdata = hdata;
nvadsp_drv_data->mboxes[mbox->id] = mbox;
out:
spin_unlock_irqrestore(&nvadsp_drv_data->mbox_lock, flags);
err:
return ret;
}
EXPORT_SYMBOL(nvadsp_mbox_open);
status_t nvadsp_mbox_send(struct nvadsp_mbox *mbox, uint32_t data,
uint32_t flags, bool block, unsigned int timeout)
{
int ret = 0;
if (!nvadsp_drv_data) {
ret = -ENOSYS;
goto out;
}
if (!mbox) {
ret = -EINVAL;
goto out;
}
retry:
ret = nvadsp_hwmbox_send_data(mbox->id, data, flags);
if (!ret)
goto out;
if (ret == -EBUSY) {
if (block) {
ret = wait_for_completion_timeout(
&nvadsp_drv_data->hwmbox_send_queue.comp,
msecs_to_jiffies(timeout));
if (ret) {
block = false;
goto retry;
} else {
ret = -ETIME;
goto out;
}
} else {
pr_debug("Failed to enqueue data 0x%x. ret: %d\n",
data, ret);
}
} else if (ret) {
pr_debug("Failed to enqueue data 0x%x. ret: %d\n", data, ret);
goto out;
}
out:
return ret;
}
EXPORT_SYMBOL(nvadsp_mbox_send);
status_t nvadsp_mbox_recv(struct nvadsp_mbox *mbox, uint32_t *data, bool block,
unsigned int timeout)
{
int ret = 0;
if (!nvadsp_drv_data) {
ret = -ENOSYS;
goto out;
}
if (!mbox) {
ret = -EINVAL;
goto out;
}
retry:
ret = mboxq_dequeue(&mbox->recv_queue, data);
if (!ret)
goto out;
if (ret == -EBUSY) {
if (block) {
ret = wait_for_completion_timeout(
&mbox->recv_queue.comp,
msecs_to_jiffies(timeout));
if (ret) {
block = false;
goto retry;
} else {
ret = -ETIME;
goto out;
}
} else {
pr_debug("Failed to receive data. ret: %d\n", ret);
}
} else if (ret) {
pr_debug("Failed to receive data. ret: %d\n", ret);
goto out;
}
out:
return ret;
}
EXPORT_SYMBOL(nvadsp_mbox_recv);
status_t nvadsp_mbox_close(struct nvadsp_mbox *mbox)
{
unsigned long flags;
int ret = 0;
if (!nvadsp_drv_data) {
ret = -ENOSYS;
goto err;
}
spin_lock_irqsave(&nvadsp_drv_data->mbox_lock, flags);
if (!mbox) {
ret = -EINVAL;
goto out;
}
if (!is_mboxq_empty(&mbox->recv_queue)) {
ret = -EINVAL;
goto out;
}
nvadsp_mbox_free_mboxid(mbox->id);
mboxq_destroy(&mbox->recv_queue);
nvadsp_drv_data->mboxes[mbox->id] = NULL;
out:
spin_unlock_irqrestore(&nvadsp_drv_data->mbox_lock, flags);
err:
return ret;
}
EXPORT_SYMBOL(nvadsp_mbox_close);
status_t __init nvadsp_mbox_init(struct platform_device *pdev)
{
struct nvadsp_drv_data *drv = platform_get_drvdata(pdev);
drv->mboxes = nvadsp_mboxes;
drv->mbox_ids = nvadsp_mbox_ids;
spin_lock_init(&drv->mbox_lock);
nvadsp_drv_data = drv;
return 0;
}

View File

@@ -0,0 +1,323 @@
/*
* mem_manager.c
*
* memory manager
*
* Copyright (C) 2014-2015 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s : %d, " fmt, __func__, __LINE__
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include "mem_manager.h"
static void clear_alloc_list(struct mem_manager_info *mm_info);
void *mem_request(void *mem_handle, const char *name, size_t size)
{
unsigned long flags;
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_iterator = NULL, *best_match_chunk = NULL;
struct mem_chunk *new_mc = NULL;
spin_lock_irqsave(&mm_info->lock, flags);
/* Is mem full? */
if (list_empty(mm_info->free_list)) {
pr_err("%s : memory full\n", mm_info->name);
spin_unlock_irqrestore(&mm_info->lock, flags);
return ERR_PTR(-ENOMEM);
}
/* Find the best size match */
list_for_each_entry(mc_iterator, mm_info->free_list, node) {
if (mc_iterator->size >= size) {
if (best_match_chunk == NULL)
best_match_chunk = mc_iterator;
else if (mc_iterator->size < best_match_chunk->size)
best_match_chunk = mc_iterator;
}
}
/* Is free node found? */
if (best_match_chunk == NULL) {
pr_err("%s : no enough memory available\n", mm_info->name);
spin_unlock_irqrestore(&mm_info->lock, flags);
return ERR_PTR(-ENOMEM);
}
/* Is it exact match? */
if (best_match_chunk->size == size) {
list_del(&best_match_chunk->node);
list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
if (best_match_chunk->address < mc_iterator->address) {
list_add_tail(&best_match_chunk->node,
&mc_iterator->node);
strlcpy(best_match_chunk->name, name,
NAME_SIZE);
spin_unlock_irqrestore(&mm_info->lock, flags);
return best_match_chunk;
}
}
list_add(&best_match_chunk->node, mm_info->alloc_list);
strlcpy(best_match_chunk->name, name, NAME_SIZE);
spin_unlock_irqrestore(&mm_info->lock, flags);
return best_match_chunk;
} else {
new_mc = kzalloc(sizeof(struct mem_chunk), GFP_ATOMIC);
if (unlikely(!new_mc)) {
pr_err("failed to allocate memory for mem_chunk\n");
spin_unlock_irqrestore(&mm_info->lock, flags);
return ERR_PTR(-ENOMEM);
}
new_mc->address = best_match_chunk->address;
new_mc->size = size;
strlcpy(new_mc->name, name, NAME_SIZE);
best_match_chunk->address += size;
best_match_chunk->size -= size;
list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
if (new_mc->address < mc_iterator->address) {
list_add_tail(&new_mc->node,
&mc_iterator->node);
spin_unlock_irqrestore(&mm_info->lock, flags);
return new_mc;
}
}
list_add_tail(&new_mc->node, mm_info->alloc_list);
spin_unlock_irqrestore(&mm_info->lock, flags);
return new_mc;
}
}
EXPORT_SYMBOL(mem_request);
/*
* Find the node with sepcified address and remove it from list
*/
bool mem_release(void *mem_handle, void *handle)
{
unsigned long flags;
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_curr = NULL, *mc_prev = NULL;
struct mem_chunk *mc_free = (struct mem_chunk *)handle;
pr_debug(" addr = %lu, size = %lu, name = %s\n",
mc_free->address, mc_free->size, mc_free->name);
spin_lock_irqsave(&mm_info->lock, flags);
list_for_each_entry(mc_curr, mm_info->free_list, node) {
if (mc_free->address < mc_curr->address) {
strlcpy(mc_free->name, "FREE", NAME_SIZE);
/* adjacent next free node */
if (mc_curr->address ==
(mc_free->address + mc_free->size)) {
mc_curr->address = mc_free->address;
mc_curr->size += mc_free->size;
list_del(&mc_free->node);
kfree(mc_free);
/* and adjacent prev free node */
if ((mc_prev != NULL) &&
((mc_prev->address + mc_prev->size) ==
mc_curr->address)) {
mc_prev->size += mc_curr->size;
list_del(&mc_curr->node);
kfree(mc_curr);
}
}
/* adjacent prev free node */
else if ((mc_prev != NULL) &&
((mc_prev->address + mc_prev->size) ==
mc_free->address)) {
mc_prev->size += mc_free->size;
list_del(&mc_free->node);
kfree(mc_free);
} else {
list_del(&mc_free->node);
list_add_tail(&mc_free->node,
&mc_curr->node);
}
spin_unlock_irqrestore(&mm_info->lock, flags);
return true;
}
mc_prev = mc_curr;
}
spin_unlock_irqrestore(&mm_info->lock, flags);
return false;
}
EXPORT_SYMBOL(mem_release);
inline unsigned long mem_get_address(void *handle)
{
struct mem_chunk *mc = (struct mem_chunk *)handle;
return mc->address;
}
EXPORT_SYMBOL(mem_get_address);
void mem_print(void *mem_handle)
{
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_iterator = NULL;
pr_info("------------------------------------\n");
pr_info("%s ALLOCATED\n", mm_info->name);
list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
pr_info(" addr = %lu, size = %lu, name = %s\n",
mc_iterator->address, mc_iterator->size,
mc_iterator->name);
}
pr_info("%s FREE\n", mm_info->name);
list_for_each_entry(mc_iterator, mm_info->free_list, node) {
pr_info(" addr = %lu, size = %lu, name = %s\n",
mc_iterator->address, mc_iterator->size,
mc_iterator->name);
}
pr_info("------------------------------------\n");
}
EXPORT_SYMBOL(mem_print);
void mem_dump(void *mem_handle, struct seq_file *s)
{
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_iterator = NULL;
seq_puts(s, "---------------------------------------\n");
seq_printf(s, "%s ALLOCATED\n", mm_info->name);
list_for_each_entry(mc_iterator, mm_info->alloc_list, node) {
seq_printf(s, " addr = %lu, size = %lu, name = %s\n",
mc_iterator->address, mc_iterator->size,
mc_iterator->name);
}
seq_printf(s, "%s FREE\n", mm_info->name);
list_for_each_entry(mc_iterator, mm_info->free_list, node) {
seq_printf(s, " addr = %lu, size = %lu, name = %s\n",
mc_iterator->address, mc_iterator->size,
mc_iterator->name);
}
seq_puts(s, "---------------------------------------\n");
}
EXPORT_SYMBOL(mem_dump);
static void clear_alloc_list(struct mem_manager_info *mm_info)
{
struct list_head *curr, *next;
struct mem_chunk *mc = NULL;
list_for_each_safe(curr, next, mm_info->alloc_list) {
mc = list_entry(curr, struct mem_chunk, node);
pr_debug(" addr = %lu, size = %lu, name = %s\n",
mc->address, mc->size,
mc->name);
mem_release(mm_info, mc);
}
}
void *create_mem_manager(const char *name, unsigned long start_address,
unsigned long size)
{
void *ret = NULL;
struct mem_chunk *mc;
struct mem_manager_info *mm_info =
kzalloc(sizeof(struct mem_manager_info), GFP_KERNEL);
if (unlikely(!mm_info)) {
pr_err("failed to allocate memory for mem_manager_info\n");
return ERR_PTR(-ENOMEM);
}
strlcpy(mm_info->name, name, NAME_SIZE);
mm_info->alloc_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (unlikely(!mm_info->alloc_list)) {
pr_err("failed to allocate memory for alloc_list\n");
ret = ERR_PTR(-ENOMEM);
goto free_mm_info;
}
mm_info->free_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (unlikely(!mm_info->free_list)) {
pr_err("failed to allocate memory for free_list\n");
ret = ERR_PTR(-ENOMEM);
goto free_alloc_list;
}
INIT_LIST_HEAD(mm_info->alloc_list);
INIT_LIST_HEAD(mm_info->free_list);
mm_info->start_address = start_address;
mm_info->size = size;
/* Add whole memory to free list */
mc = kzalloc(sizeof(struct mem_chunk), GFP_KERNEL);
if (unlikely(!mc)) {
pr_err("failed to allocate memory for mem_chunk\n");
ret = ERR_PTR(-ENOMEM);
goto free_free_list;
}
mc->address = mm_info->start_address;
mc->size = mm_info->size;
strlcpy(mc->name, "FREE", NAME_SIZE);
list_add(&mc->node, mm_info->free_list);
spin_lock_init(&mm_info->lock);
return (void *)mm_info;
free_free_list:
kfree(mm_info->free_list);
free_alloc_list:
kfree(mm_info->alloc_list);
free_mm_info:
kfree(mm_info);
return ret;
}
EXPORT_SYMBOL(create_mem_manager);
void destroy_mem_manager(void *mem_handle)
{
struct mem_manager_info *mm_info =
(struct mem_manager_info *)mem_handle;
struct mem_chunk *mc_last = NULL;
/* Clear all allocated memory */
clear_alloc_list(mm_info);
mc_last = list_entry((mm_info->free_list)->next,
struct mem_chunk, node);
list_del(&mc_last->node);
kfree(mc_last);
kfree(mm_info->alloc_list);
kfree(mm_info->free_list);
kfree(mm_info);
}
EXPORT_SYMBOL(destroy_mem_manager);

View File

@@ -0,0 +1,51 @@
/*
* Header file for memory manager
*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __TEGRA_NVADSP_MEM_MANAGER_H
#define __TEGRA_NVADSP_MEM_MANAGER_H
#include <linux/sizes.h>
#define NAME_SIZE SZ_16
struct mem_chunk {
struct list_head node;
char name[NAME_SIZE];
unsigned long address;
unsigned long size;
};
struct mem_manager_info {
struct list_head *alloc_list;
struct list_head *free_list;
char name[NAME_SIZE];
unsigned long start_address;
unsigned long size;
spinlock_t lock;
};
void *create_mem_manager(const char *name, unsigned long start_address,
unsigned long size);
void destroy_mem_manager(void *mem_handle);
void *mem_request(void *mem_handle, const char *name, size_t size);
bool mem_release(void *mem_handle, void *handle);
unsigned long mem_get_address(void *handle);
void mem_print(void *mem_handle);
void mem_dump(void *mem_handle, struct seq_file *s);
#endif /* __TEGRA_NVADSP_MEM_MANAGER_H */

View File

@@ -0,0 +1,170 @@
/*
* ADSP circular message queue
*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/tegra_nvadsp.h>
#define msgq_wmemcpy(dest, src, words) \
memcpy(dest, src, (words) * sizeof(int32_t))
/**
* msgq_init - Initialize message queue
* @msgq: pointer to the client message queue
* @size: size of message queue in words
* size will be capped to MSGQ_MAX_WSIZE
*
* This function returns 0 if no error has occurred.
*
* The message queue requires space for the queue to be
* preallocated and should only be initialized once. The queue
* space immediately follows the queue header and begins at
* msgq_t::message_queue. All messages are queued directly with
* no pointer address space translation.
*
*
*/
void msgq_init(msgq_t *msgq, int32_t size)
{
if (MSGQ_MAX_QUEUE_WSIZE < size) {
/* cap the maximum size */
pr_info("msgq_init: %d size capped to MSGQ_MAX_QUEUE_WSIZE\n",
size);
size = MSGQ_MAX_QUEUE_WSIZE;
}
msgq->size = size;
msgq->read_index = 0;
msgq->write_index = 0;
}
EXPORT_SYMBOL(msgq_init);
/**
* msgq_queue_message - Queues a message in the queue
* @msgq: pointer to the client message queue
* @message: Message buffer to copy from
*
* This function returns 0 if no error has occurred. ERR_NO_MEMORY will
* be returned if no space is available in the queue for the
* entire message. On ERR_NO_MEMORY, it may be possible the
* queue size was capped at init time to MSGQ_MAX_WSIZE if an
* unreasonable size was sepecified.
*
*
*/
int32_t msgq_queue_message(msgq_t *msgq, const msgq_message_t *message)
{
int32_t ret = 0;
if (msgq && message) {
int32_t ri = msgq->read_index;
int32_t wi = msgq->write_index;
bool wrap = ri <= wi;
int32_t *start = msgq->queue;
int32_t *end = &msgq->queue[msgq->size];
int32_t *first = &msgq->queue[wi];
int32_t *last = &msgq->queue[ri];
int32_t qremainder = wrap ? end - first : last - first;
int32_t qsize = wrap ? qremainder + (last - start) : qremainder;
int32_t msize = &message->payload[message->size] -
(int32_t *)message;
if (qsize <= msize) {
/* don't allow read == write */
ret = -ENOSPC;
} else if (msize < qremainder) {
msgq_wmemcpy(first, message, msize);
msgq->write_index = wi + MSGQ_MESSAGE_HEADER_WSIZE +
message->size;
} else {
/* message wrapped */
msgq_wmemcpy(first, message, qremainder);
msgq_wmemcpy(msgq->queue, (int32_t *)message +
qremainder, msize - qremainder);
msgq->write_index = wi + MSGQ_MESSAGE_HEADER_WSIZE +
message->size - msgq->size;
}
} else {
pr_err("NULL: msgq %p message %p\n", msgq, message);
ret = -EFAULT; /* Bad Address */
}
return ret;
}
EXPORT_SYMBOL(msgq_queue_message);
/**
* msgq_dequeue_message - Dequeues a message from the queue
* @msgq: pointer to the client message queue
* @message: Message buffer to copy to or
* NULL to discard the current message
*
* This function returns 0 if no error has occurred.
* msgq_message_t::size will be set to the size of the message
* in words. ERR_NO_MEMORY will be returned if the buffer is too small
* for the queued message. ERR_NO_MSG will be returned if there is no
* message in the queue.
*
*
*/
int32_t msgq_dequeue_message(msgq_t *msgq, msgq_message_t *message)
{
int32_t ret = 0;
int32_t ri;
int32_t wi;
msgq_message_t *msg;
if (!msgq) {
pr_err("NULL: msgq %p\n", msgq);
return -EFAULT; /* Bad Address */
}
ri = msgq->read_index;
wi = msgq->write_index;
msg = (msgq_message_t *)&msgq->queue[msgq->read_index];
if (ri == wi) {
/* empty queue */
if (message)
message->size = 0;
ret = -ENOMSG;
} else if (!message) {
/* no input buffer, discard top message */
ri += MSGQ_MESSAGE_HEADER_WSIZE + msg->size;
msgq->read_index = ri < msgq->size ? ri : ri - msgq->size;
} else if (message->size < msg->size) {
/* return buffer too small */
message->size = msg->size;
ret = -ENOSPC;
} else {
/* copy message to the output buffer */
int32_t msize = MSGQ_MESSAGE_HEADER_WSIZE + msg->size;
int32_t *first = &msgq->queue[msgq->read_index];
int32_t *end = &msgq->queue[msgq->size];
int32_t qremainder = end - first;
if (msize < qremainder) {
msgq_wmemcpy(message, first, msize);
msgq->read_index = ri + MSGQ_MESSAGE_HEADER_WSIZE +
msg->size;
} else {
/* message wrapped */
msgq_wmemcpy(message, first, qremainder);
msgq_wmemcpy((int32_t *)message + qremainder,
msgq->queue, msize - qremainder);
msgq->read_index = ri + MSGQ_MESSAGE_HEADER_WSIZE +
msg->size - msgq->size;
}
}
return ret;
}
EXPORT_SYMBOL(msgq_dequeue_message);

View File

@@ -0,0 +1,39 @@
/*
* nvadsp_arb_sema.c
*
* ADSP Arbitrated Semaphores
*
* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
nvadsp_arb_sema_t *nvadsp_arb_sema_init(uint8_t nvadsp_arb_sema_id)
{
return NULL;
}
status_t nvadsp_arb_sema_destroy(nvadsp_arb_sema_t *sema)
{
return -ENOENT;
}
status_t nvadsp_arb_sema_acquire(nvadsp_arb_sema_t *sema)
{
return -ENOENT;
}
status_t nvadsp_arb_sema_release(nvadsp_arb_sema_t *sema)
{
return -ENOENT;
}

View File

@@ -0,0 +1,67 @@
/*
* nvadsp_dram.c
*
* DRAM Sharing with ADSP
*
* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
nvadsp_iova_addr_t
nvadsp_dram_map_single(struct device *nvadsp_dev,
void *cpu_addr, size_t size,
nvadsp_data_direction_t direction)
{
return DMA_ERROR_CODE;
}
void
nvadsp_dram_unmap_single(struct device *nvadsp_dev,
nvadsp_iova_addr_t iova_addr, size_t size,
nvadsp_data_direction_t direction)
{
return;
}
nvadsp_iova_addr_t
nvadsp_dram_map_page(struct device *nvadsp_dev,
struct page *page, unsigned long offset, size_t size,
nvadsp_data_direction_t direction)
{
return DMA_ERROR_CODE;
}
void
nvadsp_dram_unmap_page(struct device *nvadsp_dev,
nvadsp_iova_addr_t iova_addr, size_t size,
nvadsp_data_direction_t direction)
{
return;
}
void
nvadsp_dram_sync_single_for_cpu(struct device *nvadsp_dev,
nvadsp_iova_addr_t iova_addr, size_t size,
nvadsp_data_direction_t direction)
{
return;
}
void
nvadsp_dram_sync_single_for_device(struct device *nvadsp_dev,
nvadsp_iova_addr_t iova_addr, size_t size,
nvadsp_data_direction_t direction)
{
return;
}

View File

@@ -0,0 +1,41 @@
/*
* nvadsp_shared_sema.c
*
* ADSP Shared Semaphores
*
* Copyright (C) 2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/tegra_nvadsp.h>
nvadsp_shared_sema_t *
nvadsp_shared_sema_init(uint8_t nvadsp_shared_sema_id)
{
return NULL;
}
status_t nvadsp_shared_sema_destroy(nvadsp_shared_sema_t *sema)
{
return -ENOENT;
}
status_t nvadsp_shared_sema_acquire(nvadsp_shared_sema_t *sema)
{
return -ENOENT;
}
status_t nvadsp_shared_sema_release(nvadsp_shared_sema_t *sema)
{
return -ENOENT;
}

View File

@@ -0,0 +1,20 @@
/*
* Copyright (C) 2016, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "dev.h"
int nvadsp_os_init(struct platform_device *pdev)
{
return 0;
}

View File

@@ -0,0 +1,21 @@
/*
* Copyright (C) 2015, NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_OS_T21X_H
#define __TEGRA_NVADSP_OS_T21X_H
#include <linux/irqchip/tegra-agic.h>
#define ADSP_WDT_INT INT_ADSP_WDT
#endif /* __TEGRA_NVADSP_OS_T21X_H */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,160 @@
/*
* os.h
*
* A header file containing data structures shared with ADSP OS
*
* Copyright (C) 2014-2016 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __TEGRA_NVADSP_OS_H
#define __TEGRA_NVADSP_OS_H
#include <linux/firmware.h>
#include "adsp_shared_struct.h"
#if defined(CONFIG_ARCH_TEGRA_21x_SOC)
#include "os-t21x.h"
#else
#include "os-t18x.h"
#endif /* CONFIG_ARCH_TEGRA_21x_SOC */
#define CONFIG_ADSP_DRAM_LOG_WITH_TAG 1
/* enable profiling of load init start */
#define RECORD_STATS 0
#define SYM_NAME_SZ 128
#define AMC_EVP_RESET_VEC_0 0x700
#define AMC_EVP_UNDEF_VEC_0 0x704
#define AMC_EVP_SWI_VEC_0 0x708
#define AMC_EVP_PREFETCH_ABORT_VEC_0 0x70c
#define AMC_EVP_DATA_ABORT_VEC_0 0x710
#define AMC_EVP_RSVD_VEC_0 0x714
#define AMC_EVP_IRQ_VEC_0 0x718
#define AMC_EVP_FIQ_VEC_0 0x71c
#define AMC_EVP_RESET_ADDR_0 0x720
#define AMC_EVP_UNDEF_ADDR_0 0x724
#define AMC_EVP_SWI_ADDR_0 0x728
#define AMC_EVP_PREFETCH_ABORT_ADDR_0 0x72c
#define AMC_EVP_DATA_ABORT_ADDR_0 0x730
#define AMC_EVP_RSVD_ADDR_0 0x734
#define AMC_EVP_IRQ_ADDR_0 0x738
#define AMC_EVP_FIQ_ADDR_0 0x73c
#define AMC_EVP_SIZE (AMC_EVP_FIQ_ADDR_0 - AMC_EVP_RESET_VEC_0 + 4)
#define AMC_EVP_WSIZE (AMC_EVP_SIZE >> 2)
#define OS_LOAD_TIMEOUT 5000 /* ms */
#define ADSP_COM_MBOX_ID 2
#define MIN_ADSP_FREQ 38400000lu /* in Hz */
enum adsp_os_cmd {
ADSP_OS_BOOT_COMPLETE,
ADSP_OS_SUSPEND,
ADSP_OS_RESUME,
};
#if RECORD_STATS
#define RECORD_STAT(x) \
(x = ktime_to_ns(ktime_get()) - x)
#define EQUATE_STAT(x, y) \
(x = y)
#define RECORD_TIMESTAMP(x) \
(x = nvadsp_get_timestamp_counter())
#else
#define RECORD_STAT(x)
#define EQUATE_STAT(x, y)
#define RECORD_TIMESTAMP(x)
#endif
/**
* struct global_sym_info - Global Symbol information required by app loader.
* @name: Name of the symbol
* @addr: Address of the symbol
* @info: Type and binding attributes
*/
struct global_sym_info {
char name[SYM_NAME_SZ];
uint32_t addr;
unsigned char info;
};
struct adsp_module {
const char *name;
void *handle;
void *module_ptr;
uint32_t adsp_module_ptr;
size_t size;
const struct app_mem_size mem_size;
bool dynamic;
};
struct app_load_stats {
s64 ns_time_load;
s64 ns_time_service_parse;
s64 ns_time_module_load;
s64 ns_time_req_firmware;
s64 ns_time_layout;
s64 ns_time_native_load;
s64 ns_time_load_mbox_send_time;
s64 ns_time_load_wait_time;
s64 ns_time_native_load_complete;
u64 ns_time_adsp_map;
u64 ns_time_adsp_app_load;
u64 ns_time_adsp_send_status;
u64 adsp_receive_timestamp;
u64 host_send_timestamp;
u64 host_receive_timestamp;
};
struct app_init_stats {
s64 ns_time_app_init;
s64 ns_time_app_alloc;
s64 ns_time_instance_memory;
s64 ns_time_native_call;
u64 ns_time_adsp_app_init;
u64 ns_time_adsp_mem_instance_map;
u64 ns_time_adsp_init_call;
u64 ns_time_adsp_send_status;
u64 adsp_receive_timestamp;
};
struct app_start_stats {
s64 ns_time_app_start;
s64 ns_time_native_call;
s64 ns_time_adsp_app_start;
u64 ns_time_app_thread_creation;
u64 ns_time_app_thread_detach;
u64 ns_time_app_thread_resume;
u64 ns_time_adsp_send_status;
u64 adsp_receive_timestamp;
};
int nvadsp_os_probe(struct platform_device *);
int nvadsp_os_init(struct platform_device *pdev);
int nvadsp_app_module_probe(struct platform_device *);
int adsp_add_load_mappings(phys_addr_t, void *, int);
struct elf32_shdr *nvadsp_get_section(const struct firmware *, char *);
struct global_sym_info *find_global_symbol(const char *);
void update_nvadsp_app_shared_ptr(void *);
struct adsp_module *load_adsp_dynamic_module(const char *, const char *,
struct device *);
struct adsp_module *load_adsp_static_module(const char *,
struct adsp_shared_app *, struct device *);
void unload_adsp_module(struct adsp_module *);
int allocate_memory_from_adsp(void **, unsigned int);
bool is_adsp_dram_addr(u64);
int load_adsp_static_apps(void);
#endif /* __TEGRA_NVADSP_OS_H */