From 70941decf9f5a5dea6c583779bf8539f23a38dfb Mon Sep 17 00:00:00 2001 From: Adeel Raza Date: Tue, 23 Jun 2020 10:50:36 -0700 Subject: [PATCH] tegra: hwpm: add SOC HWPM driver Add a driver for programming the Tegra SOC HWPM path. SOC HWPM allows performance monitoring of various Tegra IPs. The profiling tests cases are configured through IOCTLs sent by a userspace profiling app. The IOCTLs provide the following features: - IP discovery and reservation - Buffer management - Whitelist query - Register read/write ops Bug 200702306 Bug 3305495 Change-Id: I65003b126e01bd03d856767c55aa2424bcfd11fb Signed-off-by: Adeel Raza Reviewed-on: https://git-master.nvidia.com/r/c/linux-t23x/+/2515148 Reviewed-by: mobile promotions GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions --- Makefile | 11 + tegra-soc-hwpm-debugfs.c | 52 + tegra-soc-hwpm-hw.h | 377 ++++++++ tegra-soc-hwpm-io.c | 1950 ++++++++++++++++++++++++++++++++++++++ tegra-soc-hwpm-io.h | 136 +++ tegra-soc-hwpm-ioctl.c | 1679 ++++++++++++++++++++++++++++++++ tegra-soc-hwpm-log.c | 53 ++ tegra-soc-hwpm-log.h | 39 + tegra-soc-hwpm.c | 230 +++++ tegra-soc-hwpm.h | 109 +++ 10 files changed, 4636 insertions(+) create mode 100644 Makefile create mode 100644 tegra-soc-hwpm-debugfs.c create mode 100644 tegra-soc-hwpm-hw.h create mode 100644 tegra-soc-hwpm-io.c create mode 100644 tegra-soc-hwpm-io.h create mode 100644 tegra-soc-hwpm-ioctl.c create mode 100644 tegra-soc-hwpm-log.c create mode 100644 tegra-soc-hwpm-log.h create mode 100644 tegra-soc-hwpm.c create mode 100644 tegra-soc-hwpm.h diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..d53ef26 --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +# +# Tegra SOC HWPM +# + +GCOV_PROFILE := y + +obj-y += tegra-soc-hwpm.o +obj-y += tegra-soc-hwpm-io.o +obj-y += tegra-soc-hwpm-ioctl.o +obj-y += tegra-soc-hwpm-log.o +obj-$(CONFIG_DEBUG_FS) += tegra-soc-hwpm-debugfs.o diff --git a/tegra-soc-hwpm-debugfs.c b/tegra-soc-hwpm-debugfs.c new file mode 100644 index 0000000..fb33d93 --- /dev/null +++ b/tegra-soc-hwpm-debugfs.c @@ -0,0 +1,52 @@ +/* + * tegra-soc-hwpm-debugfs.c: + * This file adds debugfs nodes for the Tegra SOC HWPM driver. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "tegra-soc-hwpm.h" + +/* FIXME: This is a placeholder for now. We can add debugfs nodes as needed. */ +void tegra_soc_hwpm_debugfs_init(struct tegra_soc_hwpm *hwpm) +{ + if (!hwpm) { + tegra_soc_hwpm_err("Invalid hwpm struct"); + return; + } + + hwpm->debugfs_root = debugfs_create_dir(TEGRA_SOC_HWPM_MODULE_NAME, NULL); + if (!hwpm->debugfs_root) { + tegra_soc_hwpm_err("Failed to create debugfs root directory"); + goto fail; + } + + return; + +fail: + debugfs_remove_recursive(hwpm->debugfs_root); + hwpm->debugfs_root = NULL; +} + +void tegra_soc_hwpm_debugfs_deinit(struct tegra_soc_hwpm *hwpm) +{ + if (!hwpm) { + tegra_soc_hwpm_err("Invalid hwpm struct"); + return; + } + + debugfs_remove_recursive(hwpm->debugfs_root); + hwpm->debugfs_root = NULL; +} diff --git a/tegra-soc-hwpm-hw.h b/tegra-soc-hwpm-hw.h new file mode 100644 index 0000000..524385f --- /dev/null +++ b/tegra-soc-hwpm-hw.h @@ -0,0 +1,377 @@ +/* + * tegra-soc-hwpm-hw.h: + * This header contains HW aperture and register info for the Tegra SOC HWPM + * driver. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef TEGRA_SOC_HWPM_HW_H +#define TEGRA_SOC_HWPM_HW_H + +#include + +/* FIXME: Move enum to DT include file? */ +enum tegra_soc_hwpm_dt_aperture { + TEGRA_SOC_HWPM_INVALID_DT = -1, + + /* PERFMONs */ + TEGRA_SOC_HWPM_VI0_PERFMON_DT = 0, + TEGRA_SOC_HWPM_FIRST_PERFMON_DT = TEGRA_SOC_HWPM_VI0_PERFMON_DT, + TEGRA_SOC_HWPM_VI1_PERFMON_DT = TEGRA_SOC_HWPM_FIRST_PERFMON_DT + 1, + TEGRA_SOC_HWPM_ISP0_PERFMON_DT, + TEGRA_SOC_HWPM_VICA0_PERFMON_DT, + TEGRA_SOC_HWPM_OFAA0_PERFMON_DT, + TEGRA_SOC_HWPM_PVAV0_PERFMON_DT, + TEGRA_SOC_HWPM_PVAV1_PERFMON_DT, + TEGRA_SOC_HWPM_PVAC0_PERFMON_DT, + TEGRA_SOC_HWPM_NVDLAB0_PERFMON_DT, + TEGRA_SOC_HWPM_NVDLAB1_PERFMON_DT, + TEGRA_SOC_HWPM_NVDISPLAY0_PERFMON_DT, + TEGRA_SOC_HWPM_SYS0_PERFMON_DT, + TEGRA_SOC_HWPM_MGBE0_PERFMON_DT, + TEGRA_SOC_HWPM_MGBE1_PERFMON_DT, + TEGRA_SOC_HWPM_MGBE2_PERFMON_DT, + TEGRA_SOC_HWPM_MGBE3_PERFMON_DT, + TEGRA_SOC_HWPM_SCF0_PERFMON_DT, + TEGRA_SOC_HWPM_NVDECA0_PERFMON_DT, + TEGRA_SOC_HWPM_NVENCA0_PERFMON_DT, + TEGRA_SOC_HWPM_MSSNVLHSH0_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE0_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE1_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE2_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE3_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE4_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE5_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE6_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE7_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE8_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE9_PERFMON_DT, + TEGRA_SOC_HWPM_PCIE10_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTA0_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTA1_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTA2_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTA3_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTB0_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTB1_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTB2_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTB3_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTC0_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTC1_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTC2_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTC3_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTD0_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTD1_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTD2_PERFMON_DT, + TEGRA_SOC_HWPM_MSSCHANNELPARTD3_PERFMON_DT, + TEGRA_SOC_HWPM_MSSHUB0_PERFMON_DT, + TEGRA_SOC_HWPM_MSSHUB1_PERFMON_DT, + TEGRA_SOC_HWPM_MSSMCFCLIENT0_PERFMON_DT, + TEGRA_SOC_HWPM_MSSMCFMEM0_PERFMON_DT, + TEGRA_SOC_HWPM_MSSMCFMEM1_PERFMON_DT, + TEGRA_SOC_HWPM_LAST_PERFMON_DT = TEGRA_SOC_HWPM_MSSMCFMEM1_PERFMON_DT, + + /* PMA */ + TEGRA_SOC_HWPM_PMA_DT = TEGRA_SOC_HWPM_LAST_PERFMON_DT + 1, + + /* RTR */ + TEGRA_SOC_HWPM_RTR_DT, + + TEGRA_SOC_HWPM_NUM_DT_APERTURES +}; +#define IS_PERFMON(idx) (((idx) >= TEGRA_SOC_HWPM_FIRST_PERFMON_DT) && \ + ((idx) <= TEGRA_SOC_HWPM_LAST_PERFMON_DT)) + +/* RPG_PM Aperture */ +/* FIXME: Use __SIZE_1 for handling per PERFMON registers? */ +#define NV_ADDRESS_MAP_RPG_PM_BASE 0x0f100000 +#define NV_ADDRESS_MAP_RPG_PM_LIMIT 0x0f149fff +#define NV_PERF_PMMSYS_PERDOMAIN_OFFSET 0x1000 +#define PERFMON_BASE(ip_idx) (NV_ADDRESS_MAP_RPG_PM_BASE + \ + ((u32)(ip_idx)) * NV_PERF_PMMSYS_PERDOMAIN_OFFSET) +#define PERFMON_LIMIT(ip_idx) (PERFMON_BASE((ip_idx) + 1) - 1) +#define NV_PERF_PMMSYS_CONTROL 0x9C +#define NV_PERF_PMMSYS_CONTROL_MODE_SHIFT 0 +#define NV_PERF_PMMSYS_CONTROL_MODE_MASK 0x00000007 +#define NV_PERF_PMMSYS_CONTROL_MODE_DISABLE 0x00000000 +#define NV_PERF_PMMSYS_CONTROL_MODE_A 0x00000001 +#define NV_PERF_PMMSYS_CONTROL_MODE_B 0x00000002 +#define NV_PERF_PMMSYS_CONTROL_MODE_C 0x00000003 +#define NV_PERF_PMMSYS_CONTROL_MODE_E 0x00000005 +#define NV_PERF_PMMSYS_CONTROL_MODE_NULL 0x00000007 +#define NV_PERF_PMMSYS_SYS0_ENGINESTATUS 0xC8 +#define NV_PERF_PMMSYS_SYS0_ENGINESTATUS_ENABLE_SHIFT 8 +#define NV_PERF_PMMSYS_SYS0_ENGINESTATUS_ENABLE_MASK 0x00000100 +#define NV_PERF_PMMSYS_SYS0_ENGINESTATUS_ENABLE_MASKED 0x0 +#define NV_PERF_PMMSYS_SYS0_ENGINESTATUS_ENABLE_OUT 0x1 + +/* PMA Aperture */ +/* FIXME: Add __SIZE_1 logic for channels? */ +#define NV_ADDRESS_MAP_PMA_BASE 0x0f14a000 +#define NV_ADDRESS_MAP_PMA_LIMIT 0x0f14bfff +#define NV_PERF_PMASYS_CG2 0x44 +#define NV_PERF_PMASYS_CG2_SLCG_SHIFT 0 +/* FIXME: Use standard format for masks */ +#define NV_PERF_PMASYS_CG2_SLCG_MASK 0x1 +#define NV_PERF_PMASYS_CG2_SLCG_ENABLED 0x00000000 +#define NV_PERF_PMASYS_CG2_SLCG_DISABLED 0x00000001 +#define NV_PERF_PMASYS_CONTROLB 0x70 +#define NV_PERF_PMASYS_CONTROLB_COALESCE_TIMEOUT_CYCLES_SHIFT 4 +#define NV_PERF_PMASYS_CONTROLB_COALESCE_TIMEOUT_CYCLES_MASK 0x00000070 +#define NV_PERF_PMASYS_CONTROLB_COALESCE_TIMEOUT_CYCLES__PROD 0x00000004 +#define NV_PERF_PMASYS_CHANNEL_STATUS_SECURE(i) (0x610+(i)*0x180) +#define NV_PERF_PMASYS_CHANNEL_STATUS_SECURE_CH0 NV_PERF_PMASYS_CHANNEL_STATUS_SECURE(0) +#define NV_PERF_PMASYS_CHANNEL_STATUS_SECURE_MEMBUF_STATUS_SHIFT 0 +#define NV_PERF_PMASYS_CHANNEL_STATUS_SECURE_MEMBUF_STATUS_MASK 0x00000001 +#define NV_PERF_PMASYS_CHANNEL_STATUS_SECURE_MEMBUF_STATUS_INIT 0x00000000 +#define NV_PERF_PMASYS_CHANNEL_STATUS_SECURE_MEMBUF_STATUS_OVERFLOWED 0x00000001 +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER(i) (0x620+(i)*0x180) +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER_CH0 NV_PERF_PMASYS_CHANNEL_CONTROL_USER(0) +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER_STREAM_SHIFT 0 +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER_STREAM_MASK 0x00000001 +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER_STREAM_DISABLE 0x00000000 +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER_STREAM_ENABLE 0x00000001 +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER_UPDATE_BYTES_SHIFT 31 +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER_UPDATE_BYTES_MASK 0x80000000 +#define NV_PERF_PMASYS_CHANNEL_CONTROL_USER_UPDATE_BYTES_DOIT 0x00000001 +#define NV_PERF_PMASYS_CHANNEL_MEM_BUMP(i) (0x624+(i)*4) +#define NV_PERF_PMASYS_CHANNEL_MEM_BUMP_CH0 NV_PERF_PMASYS_CHANNEL_MEM_BUMP(0) +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK(i) (0x638+(i)*4) +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_CH0 NV_PERF_PMASYS_CHANNEL_MEM_BLOCK(0) +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_PTR_SHIFT 0 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_PTR_MASK 0x3fffffff +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_BASE_SHIFT 0 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_BASE_MASK 0xfffffff +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_TARGET_SHIFT 28 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_TARGET_MASK 0x30000000 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_TARGET_LFB 0x00000000 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_TARGET_SYS_COH 0x00000002 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_TARGET_SYS_NCOH 0x00000003 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_VALID_SHIFT 31 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_VALID_MASK 0x80000000 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_VALID_FALSE 0x00000000 +#define NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_VALID_TRUE 0x00000001 +#define NV_PERF_PMASYS_CHANNEL_CONFIG_USER(i) (0x640+(i)*0x180) +#define NV_PERF_PMASYS_CHANNEL_CONFIG_USER_CH0 NV_PERF_PMASYS_CHANNEL_CONFIG_USER(0) +#define NV_PERF_PMASYS_CHANNEL_CONFIG_USER_COALESCE_TIMEOUT_CYCLES_SHIFT 4 +#define NV_PERF_PMASYS_CHANNEL_CONFIG_USER_COALESCE_TIMEOUT_CYCLES_MASK 0x00000070 +#define NV_PERF_PMASYS_CHANNEL_CONFIG_USER_COALESCE_TIMEOUT_CYCLES__PROD 0x00000004 +#define NV_PERF_PMASYS_CHANNEL_OUTBASE(i) (0x644+(i)*4) +#define NV_PERF_PMASYS_CHANNEL_OUTBASE_CH0 NV_PERF_PMASYS_CHANNEL_OUTBASE(0) +#define NV_PERF_PMASYS_CHANNEL_OUTBASE_PTR_SHIFT 5 +#define NV_PERF_PMASYS_CHANNEL_OUTBASE_PTR_MASK 0xffffffe0 +#define NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER(i) (0x648+(i)*4) +#define NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER_CH0 NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER(0) +#define NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER_PTR_SHIFT 0 +#define NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER_PTR_MASK 0x000000ff +#define NV_PERF_PMASYS_CHANNEL_OUTSIZE(i) (0x64C+(i)*4) +#define NV_PERF_PMASYS_CHANNEL_OUTSIZE_CH0 NV_PERF_PMASYS_CHANNEL_OUTSIZE(0) +#define NV_PERF_PMASYS_CHANNEL_OUTSIZE_NUMBYTES_SHIFT 5 +#define NV_PERF_PMASYS_CHANNEL_OUTSIZE_NUMBYTES_MASK 0xffffffe0 +#define NV_PERF_PMASYS_CHANNEL_MEM_HEAD(i) (0x650+(i)*4) +#define NV_PERF_PMASYS_CHANNEL_MEM_HEAD_CH0 NV_PERF_PMASYS_CHANNEL_MEM_HEAD(0) +#define NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR(i) (0x658+(i)*4) +#define NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR_CH0 NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR(0) +#define NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR_PTR_SHIFT 2 +#define NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR_PTR_MASK 0xfffffffc +#define NV_PERF_PMASYS_SYS_TRIGGER_START_MASK 0x66C +#define NV_PERF_PMASYS_SYS_TRIGGER_START_MASKB 0x670 +#define NV_PERF_PMASYS_SYS_TRIGGER_STOP_MASK 0x684 +#define NV_PERF_PMASYS_SYS_TRIGGER_STOP_MASKB 0x688 +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER(i) (0x694+(i)*0x180) +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_CH0 NV_PERF_PMASYS_TRIGGER_CONFIG_USER(0) +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_PMA_PULSE_SHIFT 0 +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_PMA_PULSE_MASK 0x00000001 +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_PMA_PULSE_DISABLE 0x00000000 +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_PMA_PULSE_ENABLE 0x00000001 +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_RECORD_STREAM_SHIFT 6 +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_RECORD_STREAM_MASK 0x00000040 +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_RECORD_STREAM_DISABLE 0x00000000 +#define NV_PERF_PMASYS_TRIGGER_CONFIG_USER_RECORD_STREAM_ENABLE 0x00000001 +#define NV_PERF_PMASYS_ENGINESTATUS 0x75C +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_SHIFT 0 +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_MASK 0x00000007 +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_EMPTY 0x00000000 +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_ACTIVE 0x00000001 +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_PAUSED 0x00000002 +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_QUIESCENT 0x00000003 +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_STALLED 0x00000005 +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_FAULTED 0x00000006 +#define NV_PERF_PMASYS_ENGINESTATUS_STATUS_HALTED 0x00000007 +#define NV_PERF_PMASYS_ENGINESTATUS_RBUFEMPTY_SHIFT 4 +#define NV_PERF_PMASYS_ENGINESTATUS_RBUFEMPTY_MASK 0x00000010 +#define NV_PERF_PMASYS_ENGINESTATUS_RBUFEMPTY_EMPTY 0x00000001 +#define NV_PERF_PMASYS_ENGINESTATUS_MBU_STATUS_SHIFT 5 +#define NV_PERF_PMASYS_ENGINESTATUS_MBU_STATUS_MASK 0x00000060 +#define NV_PERF_PMASYS_ENGINESTATUS_MBU_STATUS_IDLE 0x00000000 +#define NV_PERF_PMASYS_ENGINESTATUS_MBU_STATUS_BUSY 0x00000001 +#define NV_PERF_PMASYS_ENGINESTATUS_MBU_STATUS_PENDING 0x00000002 + +/* RTR Aperture */ +#define NV_ADDRESS_MAP_RTR_BASE 0x0f14d000 +#define NV_ADDRESS_MAP_RTR_LIMIT 0x0f14dfff +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS 0x10 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_SHIFT 0 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_MASK 0x00000007 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_EMPTY 0x00000000 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_ACTIVE 0x00000001 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_PAUSED 0x00000002 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_QUIESCENT 0x00000003 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_STALLED 0x00000005 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_FAULTED 0x00000006 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_HALTED 0x00000007 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_ENABLE_SHIFT 8 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_ENABLE_MASK 0x00000100 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_ENABLE_MASKED 0x0 +#define NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_ENABLE_OUT 0x1 +#define NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS 0x14 +#define NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED_SHIFT 0 +#define NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED_MASK 0x00000007 +#define NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED_EMPTY 0x00000000 +#define NV_PERF_PMMSYS_SYS0ROUTER_CG2 0x18 +#define NV_PERF_PMMSYS_SYS0ROUTER_CG2_SLCG_SHIFT 0 +#define NV_PERF_PMMSYS_SYS0ROUTER_CG2_SLCG_MASK 0x3 +#define NV_PERF_PMMSYS_SYS0ROUTER_CG2_SLCG_ENABLED 0x00000000 +#define NV_PERF_PMMSYS_SYS0ROUTER_CG2_SLCG_DISABLED 0x00000003 + +/* Display Aperture */ +#define NV_ADDRESS_MAP_DISP_BASE 0x13800000 +#define NV_ADDRESS_MAP_DISP_LIMIT 0x138effff + +/* VI Apertures */ +#define NV_ADDRESS_MAP_VI_THI_BASE 0x15f00000 +#define NV_ADDRESS_MAP_VI_THI_LIMIT 0x15ffffff +#define NV_ADDRESS_MAP_VI2_THI_BASE 0x14f00000 +#define NV_ADDRESS_MAP_VI2_THI_LIMIT 0x14ffffff + +/* VIC Aperture */ +#define NV_ADDRESS_MAP_VIC_BASE 0x15340000 +#define NV_ADDRESS_MAP_VIC_LIMIT 0x1537ffff + +/* NVDEC Aperture */ +#define NV_ADDRESS_MAP_NVDEC_BASE 0x15480000 +#define NV_ADDRESS_MAP_NVDEC_LIMIT 0x154bffff + +/* NVENC Aperture */ +#define NV_ADDRESS_MAP_NVENC_BASE 0x154c0000 +#define NV_ADDRESS_MAP_NVENC_LIMIT 0x154fffff + +/* OFA Aperture */ +#define NV_ADDRESS_MAP_OFA_BASE 0x15a50000 +#define NV_ADDRESS_MAP_OFA_LIMIT 0x15a5ffff + +/* ISP Aperture */ +#define NV_ADDRESS_MAP_ISP_THI_BASE 0x14b00000 +#define NV_ADDRESS_MAP_ISP_THI_LIMIT 0x14bfffff + +/* PCIE Apertures */ +#define NV_ADDRESS_MAP_PCIE_C0_CTL_BASE 0x14180000 +#define NV_ADDRESS_MAP_PCIE_C0_CTL_LIMIT 0x1419ffff +#define NV_ADDRESS_MAP_PCIE_C1_CTL_BASE 0x14100000 +#define NV_ADDRESS_MAP_PCIE_C1_CTL_LIMIT 0x1411ffff +#define NV_ADDRESS_MAP_PCIE_C2_CTL_BASE 0x14120000 +#define NV_ADDRESS_MAP_PCIE_C2_CTL_LIMIT 0x1413ffff +#define NV_ADDRESS_MAP_PCIE_C3_CTL_BASE 0x14140000 +#define NV_ADDRESS_MAP_PCIE_C3_CTL_LIMIT 0x1415ffff +#define NV_ADDRESS_MAP_PCIE_C4_CTL_BASE 0x14160000 +#define NV_ADDRESS_MAP_PCIE_C4_CTL_LIMIT 0x1417ffff +#define NV_ADDRESS_MAP_PCIE_C5_CTL_BASE 0x141a0000 +#define NV_ADDRESS_MAP_PCIE_C5_CTL_LIMIT 0x141bffff +#define NV_ADDRESS_MAP_PCIE_C6_CTL_BASE 0x141c0000 +#define NV_ADDRESS_MAP_PCIE_C6_CTL_LIMIT 0x141dffff +#define NV_ADDRESS_MAP_PCIE_C7_CTL_BASE 0x141e0000 +#define NV_ADDRESS_MAP_PCIE_C7_CTL_LIMIT 0x141fffff +#define NV_ADDRESS_MAP_PCIE_C8_CTL_BASE 0x140a0000 +#define NV_ADDRESS_MAP_PCIE_C8_CTL_LIMIT 0x140bffff +#define NV_ADDRESS_MAP_PCIE_C9_CTL_BASE 0x140c0000 +#define NV_ADDRESS_MAP_PCIE_C9_CTL_LIMIT 0x140dffff +#define NV_ADDRESS_MAP_PCIE_C10_CTL_BASE 0x140e0000 +#define NV_ADDRESS_MAP_PCIE_C10_CTL_LIMIT 0x140fffff + +/* PVA Aperture */ +#define NV_ADDRESS_MAP_PVA0_PM_BASE 0x16200000 +#define NV_ADDRESS_MAP_PVA0_PM_LIMIT 0x1620ffff + +/* NVDLA Apertures */ +#define NV_ADDRESS_MAP_NVDLA0_BASE 0x15880000 +#define NV_ADDRESS_MAP_NVDLA0_LIMIT 0x158bffff +#define NV_ADDRESS_MAP_NVDLA1_BASE 0x158c0000 +#define NV_ADDRESS_MAP_NVDLA1_LIMIT 0x158fffff + +/* MGBE Apertures */ +#define NV_ADDRESS_MAP_MGBE0_BASE 0x06800000 +#define NV_ADDRESS_MAP_MGBE0_LIMIT 0x068fffff +#define NV_ADDRESS_MAP_MGBE1_BASE 0x06900000 +#define NV_ADDRESS_MAP_MGBE1_LIMIT 0x069fffff +#define NV_ADDRESS_MAP_MGBE2_BASE 0x06a00000 +#define NV_ADDRESS_MAP_MGBE2_LIMIT 0x06afffff +#define NV_ADDRESS_MAP_MGBE3_BASE 0x06b00000 +#define NV_ADDRESS_MAP_MGBE3_LIMIT 0x06bfffff + +/* MC Apertures */ +#define NV_ADDRESS_MAP_MCB_BASE 0x02c10000 +#define NV_ADDRESS_MAP_MCB_LIMIT 0x02c1ffff +#define NV_ADDRESS_MAP_MC0_BASE 0x02c20000 +#define NV_ADDRESS_MAP_MC0_LIMIT 0x02c2ffff +#define NV_ADDRESS_MAP_MC1_BASE 0x02c30000 +#define NV_ADDRESS_MAP_MC1_LIMIT 0x02c3ffff +#define NV_ADDRESS_MAP_MC2_BASE 0x02c40000 +#define NV_ADDRESS_MAP_MC2_LIMIT 0x02c4ffff +#define NV_ADDRESS_MAP_MC3_BASE 0x02c50000 +#define NV_ADDRESS_MAP_MC3_LIMIT 0x02c5ffff +#define NV_ADDRESS_MAP_MC4_BASE 0x02b80000 +#define NV_ADDRESS_MAP_MC4_LIMIT 0x02b8ffff +#define NV_ADDRESS_MAP_MC5_BASE 0x02b90000 +#define NV_ADDRESS_MAP_MC5_LIMIT 0x02b9ffff +#define NV_ADDRESS_MAP_MC6_BASE 0x02ba0000 +#define NV_ADDRESS_MAP_MC6_LIMIT 0x02baffff +#define NV_ADDRESS_MAP_MC7_BASE 0x02bb0000 +#define NV_ADDRESS_MAP_MC7_LIMIT 0x02bbffff +#define NV_ADDRESS_MAP_MC8_BASE 0x01700000 +#define NV_ADDRESS_MAP_MC8_LIMIT 0x0170ffff +#define NV_ADDRESS_MAP_MC9_BASE 0x01710000 +#define NV_ADDRESS_MAP_MC9_LIMIT 0x0171ffff +#define NV_ADDRESS_MAP_MC10_BASE 0x01720000 +#define NV_ADDRESS_MAP_MC10_LIMIT 0x0172ffff +#define NV_ADDRESS_MAP_MC11_BASE 0x01730000 +#define NV_ADDRESS_MAP_MC11_LIMIT 0x0173ffff +#define NV_ADDRESS_MAP_MC12_BASE 0x01740000 +#define NV_ADDRESS_MAP_MC12_LIMIT 0x0174ffff +#define NV_ADDRESS_MAP_MC13_BASE 0x01750000 +#define NV_ADDRESS_MAP_MC13_LIMIT 0x0175ffff +#define NV_ADDRESS_MAP_MC14_BASE 0x01760000 +#define NV_ADDRESS_MAP_MC14_LIMIT 0x0176ffff +#define NV_ADDRESS_MAP_MC15_BASE 0x01770000 +#define NV_ADDRESS_MAP_MC15_LIMIT 0x0177ffff + +/* MSSNVLINK Apertures */ +#define NV_ADDRESS_MAP_MSS_NVLINK_1_BASE 0x01f20000 +#define NV_ADDRESS_MAP_MSS_NVLINK_1_LIMIT 0x01f3ffff +#define NV_ADDRESS_MAP_MSS_NVLINK_2_BASE 0x01f40000 +#define NV_ADDRESS_MAP_MSS_NVLINK_2_LIMIT 0x01f5ffff +#define NV_ADDRESS_MAP_MSS_NVLINK_3_BASE 0x01f60000 +#define NV_ADDRESS_MAP_MSS_NVLINK_3_LIMIT 0x01f7ffff +#define NV_ADDRESS_MAP_MSS_NVLINK_4_BASE 0x01f80000 +#define NV_ADDRESS_MAP_MSS_NVLINK_4_LIMIT 0x01f9ffff +#define NV_ADDRESS_MAP_MSS_NVLINK_5_BASE 0x01fa0000 +#define NV_ADDRESS_MAP_MSS_NVLINK_5_LIMIT 0x01fbffff +#define NV_ADDRESS_MAP_MSS_NVLINK_6_BASE 0x01fc0000 +#define NV_ADDRESS_MAP_MSS_NVLINK_6_LIMIT 0x01fdffff +#define NV_ADDRESS_MAP_MSS_NVLINK_7_BASE 0x01fe0000 +#define NV_ADDRESS_MAP_MSS_NVLINK_7_LIMIT 0x01ffffff +#define NV_ADDRESS_MAP_MSS_NVLINK_8_BASE 0x01e00000 +#define NV_ADDRESS_MAP_MSS_NVLINK_8_LIMIT 0x01e1ffff + +#endif /* TEGRA_SOC_HWPM_HW_H */ diff --git a/tegra-soc-hwpm-io.c b/tegra-soc-hwpm-io.c new file mode 100644 index 0000000..579ae64 --- /dev/null +++ b/tegra-soc-hwpm-io.c @@ -0,0 +1,1950 @@ +/* + * tegra-soc-hwpm-io.c: + * This file contians register read/write functions for the Tegra SOC HWPM + * driver. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "tegra-soc-hwpm-io.h" + +/* FIXME: Auto-generate whitelists */ +struct whitelist perfmon_wlist[] = { + {.reg = 0x0, .zero_in_init = true,}, + {.reg = 0x4, .zero_in_init = true,}, + {.reg = 0x8, .zero_in_init = true,}, + {.reg = 0xc, .zero_in_init = true,}, + {.reg = 0x10, .zero_in_init = true,}, + {.reg = 0x14, .zero_in_init = true,}, + {.reg = 0x20, .zero_in_init = true,}, + {.reg = 0x24, .zero_in_init = true,}, + {.reg = 0x28, .zero_in_init = true,}, + {.reg = 0x2c, .zero_in_init = true,}, + {.reg = 0x30, .zero_in_init = true,}, + {.reg = 0x34, .zero_in_init = true,}, + {.reg = 0x40, .zero_in_init = true,}, + {.reg = 0x44, .zero_in_init = true,}, + {.reg = 0x48, .zero_in_init = true,}, + {.reg = 0x4c, .zero_in_init = true,}, + {.reg = 0x50, .zero_in_init = true,}, + {.reg = 0x54, .zero_in_init = true,}, + {.reg = 0x58, .zero_in_init = true,}, + {.reg = 0x5c, .zero_in_init = true,}, + {.reg = 0x60, .zero_in_init = true,}, + {.reg = 0x64, .zero_in_init = true,}, + {.reg = 0x68, .zero_in_init = true,}, + {.reg = 0x6c, .zero_in_init = true,}, + {.reg = 0x70, .zero_in_init = true,}, + {.reg = 0x74, .zero_in_init = true,}, + {.reg = 0x78, .zero_in_init = true,}, + {.reg = 0x7c, .zero_in_init = true,}, + {.reg = 0x80, .zero_in_init = true,}, + {.reg = 0x84, .zero_in_init = true,}, + {.reg = 0x88, .zero_in_init = true,}, + {.reg = 0x8c, .zero_in_init = true,}, + {.reg = 0x90, .zero_in_init = true,}, + {.reg = 0x98, .zero_in_init = true,}, + {.reg = 0x9c, .zero_in_init = true,}, + {.reg = 0xa0, .zero_in_init = true,}, + {.reg = 0xa4, .zero_in_init = true,}, + {.reg = 0xa8, .zero_in_init = true,}, + {.reg = 0xac, .zero_in_init = true,}, + {.reg = 0xb0, .zero_in_init = true,}, + {.reg = 0xb4, .zero_in_init = true,}, + {.reg = 0xb8, .zero_in_init = true,}, + {.reg = 0xbc, .zero_in_init = true,}, + {.reg = 0xc0, .zero_in_init = true,}, + {.reg = 0xc4, .zero_in_init = true,}, + {.reg = 0xc8, .zero_in_init = true,}, + {.reg = 0xcc, .zero_in_init = true,}, + {.reg = 0xd0, .zero_in_init = true,}, + {.reg = 0xd4, .zero_in_init = true,}, + {.reg = 0xd8, .zero_in_init = true,}, + {.reg = 0xdc, .zero_in_init = true,}, + {.reg = 0xe0, .zero_in_init = true,}, + {.reg = 0xe4, .zero_in_init = true,}, + {.reg = 0xe8, .zero_in_init = true,}, + {.reg = 0xec, .zero_in_init = true,}, + {.reg = 0xf8, .zero_in_init = true,}, + {.reg = 0xfc, .zero_in_init = true,}, + {.reg = 0x100, .zero_in_init = true,}, + {.reg = 0x108, .zero_in_init = true,}, + {.reg = 0x110, .zero_in_init = true,}, + {.reg = 0x114, .zero_in_init = true,}, + {.reg = 0x118, .zero_in_init = true,}, + {.reg = 0x11c, .zero_in_init = true,}, + {.reg = 0x120, .zero_in_init = true,}, + {.reg = 0x124, .zero_in_init = true,}, + {.reg = 0x128, .zero_in_init = true,}, + {.reg = 0x130, .zero_in_init = true,}, +}; + +struct whitelist vi_thi_wlist[] = { + {.reg = (0x1088 << 2), .zero_in_init = false,}, + {.reg = (0x3a00 << 2), .zero_in_init = false,}, + {.reg = (0x3a01 << 2), .zero_in_init = false,}, + {.reg = (0x3a02 << 2), .zero_in_init = true,}, + {.reg = (0x3a03 << 2), .zero_in_init = true,}, + {.reg = (0x3a04 << 2), .zero_in_init = true,}, + {.reg = (0x3a05 << 2), .zero_in_init = true,}, + {.reg = (0x3a06 << 2), .zero_in_init = true,}, +}; +struct whitelist vi2_thi_wlist[] = { + {.reg = (0x1088 << 2), .zero_in_init = false,}, + {.reg = (0x3a00 << 2), .zero_in_init = false,}, + {.reg = (0x3a01 << 2), .zero_in_init = false,}, + {.reg = (0x3a02 << 2), .zero_in_init = true,}, + {.reg = (0x3a03 << 2), .zero_in_init = true,}, + {.reg = (0x3a04 << 2), .zero_in_init = true,}, + {.reg = (0x3a05 << 2), .zero_in_init = true,}, + {.reg = (0x3a06 << 2), .zero_in_init = true,}, +}; +/* + * Aperture Ranges (start_pa/end_pa): + * - start_pa and end_pa is 0 for PERFMON, PMA, and RTR apertures. These + * ranges will be extracted from the device tree. + * - IP apertures are not listed in the device tree because we don't map them. + * Therefore, start_pa and end_pa for IP apertures are hardcoded here. IP + * apertures are listed here because we need to track their whitelists. + */ +struct hwpm_resource_aperture vi_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_VI0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_VI1_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_VI_THI_BASE, + .end_pa = NV_ADDRESS_MAP_VI_THI_LIMIT, + .fake_registers = NULL, + .wlist = vi_thi_wlist, + .wlist_size = ARRAY_SIZE(vi_thi_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_VI2_THI_BASE, + .end_pa = NV_ADDRESS_MAP_VI2_THI_LIMIT, + .fake_registers = NULL, + .wlist = vi2_thi_wlist, + .wlist_size = ARRAY_SIZE(vi2_thi_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist isp_thi_wlist[] = { + {.reg = (0x1088 << 2), .zero_in_init = false,}, + {.reg = (0x2470 << 2), .zero_in_init = false,}, + {.reg = (0x2471 << 2), .zero_in_init = false,}, + {.reg = (0x2472 << 2), .zero_in_init = true,}, + {.reg = (0x2473 << 2), .zero_in_init = true,}, + {.reg = (0x2474 << 2), .zero_in_init = true,}, + {.reg = (0x2475 << 2), .zero_in_init = true,}, + {.reg = (0x2476 << 2), .zero_in_init = true,}, +}; +struct hwpm_resource_aperture isp_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_ISP0_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_ISP_THI_BASE, + .end_pa = NV_ADDRESS_MAP_ISP_THI_LIMIT, + .fake_registers = NULL, + .wlist = isp_thi_wlist, + .wlist_size = ARRAY_SIZE(isp_thi_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist vic_wlist[] = { + {.reg = 0x1088, .zero_in_init = false,}, + {.reg = 0x10a8, .zero_in_init = false,}, + {.reg = 0x1c00, .zero_in_init = true,}, + {.reg = 0x1c04, .zero_in_init = true,}, + {.reg = 0x1c08, .zero_in_init = true,}, + {.reg = 0x1c0c, .zero_in_init = true,}, + {.reg = 0x1c10, .zero_in_init = true,}, + {.reg = 0x1c14, .zero_in_init = false,}, + {.reg = 0x1c18, .zero_in_init = false,}, +}; +struct hwpm_resource_aperture vic_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_VICA0_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_VIC_BASE, + .end_pa = NV_ADDRESS_MAP_VIC_LIMIT, + .fake_registers = NULL, + .wlist = vic_wlist, + .wlist_size = ARRAY_SIZE(vic_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist ofa_wlist[] = { + {.reg = 0x1088, .zero_in_init = false,}, + {.reg = 0x3308, .zero_in_init = true,}, + {.reg = 0x330c, .zero_in_init = true,}, + {.reg = 0x3310, .zero_in_init = true,}, + {.reg = 0x3314, .zero_in_init = true,}, + {.reg = 0x3318, .zero_in_init = false,}, + {.reg = 0x331c, .zero_in_init = false,}, +}; +struct hwpm_resource_aperture ofa_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_OFAA0_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_OFA_BASE, + .end_pa = NV_ADDRESS_MAP_OFA_LIMIT, + .fake_registers = NULL, + .wlist = ofa_wlist, + .wlist_size = ARRAY_SIZE(ofa_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist pva0_pm_wlist[] = { + {.reg = 0x8000, .zero_in_init = false,}, + {.reg = 0x8004, .zero_in_init = false,}, + {.reg = 0x8008, .zero_in_init = false,}, + {.reg = 0x800c, .zero_in_init = true,}, + {.reg = 0x8010, .zero_in_init = true,}, + {.reg = 0x8014, .zero_in_init = true,}, + {.reg = 0x8018, .zero_in_init = true,}, + {.reg = 0x801c, .zero_in_init = true,}, + {.reg = 0x8020, .zero_in_init = true,}, +}; +/* FIXME: Any missing apertures? */ +struct hwpm_resource_aperture pva_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PVAV0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PVAV1_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .fake_registers = NULL, + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PVAC0_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PVA0_PM_BASE, + .end_pa = NV_ADDRESS_MAP_PVA0_PM_LIMIT, + .fake_registers = NULL, + .wlist = pva0_pm_wlist, + .wlist_size = ARRAY_SIZE(pva0_pm_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist nvdla_wlist[] = { + {.reg = 0x1088, .zero_in_init = false,}, + {.reg = 0x1a000, .zero_in_init = false,}, + {.reg = 0x1a004, .zero_in_init = false,}, + {.reg = 0x1a008, .zero_in_init = true,}, + {.reg = 0x1a01c, .zero_in_init = true,}, + {.reg = 0x1a030, .zero_in_init = true,}, + {.reg = 0x1a044, .zero_in_init = true,}, + {.reg = 0x1a058, .zero_in_init = true,}, + {.reg = 0x1a06c, .zero_in_init = true,}, +}; +struct hwpm_resource_aperture nvdla_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_NVDLAB0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .fake_registers = NULL, + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_NVDLAB1_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_NVDLA0_BASE, + .end_pa = NV_ADDRESS_MAP_NVDLA0_LIMIT, + .fake_registers = NULL, + .wlist = nvdla_wlist, + .wlist_size = ARRAY_SIZE(nvdla_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_NVDLA1_BASE, + .end_pa = NV_ADDRESS_MAP_NVDLA1_LIMIT, + .fake_registers = NULL, + .wlist = nvdla_wlist, + .wlist_size = ARRAY_SIZE(nvdla_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist mgbe_wlist[] = { + {.reg = 0x8020, .zero_in_init = true,}, + {.reg = 0x8024, .zero_in_init = false,}, +}; +struct hwpm_resource_aperture mgbe_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MGBE0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .fake_registers = NULL, + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MGBE1_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MGBE2_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MGBE3_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MGBE0_BASE, + .end_pa = NV_ADDRESS_MAP_MGBE0_LIMIT, + .fake_registers = NULL, + .wlist = mgbe_wlist, + .wlist_size = ARRAY_SIZE(mgbe_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MGBE1_BASE, + .end_pa = NV_ADDRESS_MAP_MGBE1_LIMIT, + .fake_registers = NULL, + .wlist = mgbe_wlist, + .wlist_size = ARRAY_SIZE(mgbe_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MGBE2_BASE, + .end_pa = NV_ADDRESS_MAP_MGBE2_LIMIT, + .fake_registers = NULL, + .wlist = mgbe_wlist, + .wlist_size = ARRAY_SIZE(mgbe_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MGBE3_BASE, + .end_pa = NV_ADDRESS_MAP_MGBE3_LIMIT, + .fake_registers = NULL, + .wlist = mgbe_wlist, + .wlist_size = ARRAY_SIZE(mgbe_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +/* FIXME: Any missing apertures? */ +struct hwpm_resource_aperture scf_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_SCF0_PERFMON_DT, + }, +}; + +struct whitelist nvdec_wlist[] = { + {.reg = 0x1088, .zero_in_init = false,}, + {.reg = 0x1b48, .zero_in_init = false,}, + {.reg = 0x1b4c, .zero_in_init = false,}, + {.reg = 0x1b50, .zero_in_init = true,}, + {.reg = 0x1b54, .zero_in_init = true,}, + {.reg = 0x1b58, .zero_in_init = true,}, + {.reg = 0x1b5c, .zero_in_init = true,}, +}; +struct hwpm_resource_aperture nvdec_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_NVDECA0_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_NVDEC_BASE, + .end_pa = NV_ADDRESS_MAP_NVDEC_LIMIT, + .fake_registers = NULL, + .wlist = nvdec_wlist, + .wlist_size = ARRAY_SIZE(nvdec_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist nvenc_wlist[] = { + {.reg = 0x1088, .zero_in_init = false,}, + {.reg = 0x212c, .zero_in_init = false,}, + {.reg = 0x2130, .zero_in_init = false,}, + {.reg = 0x2134, .zero_in_init = true,}, +}; +struct hwpm_resource_aperture nvenc_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_NVENCA0_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_NVENC_BASE, + .end_pa = NV_ADDRESS_MAP_NVENC_LIMIT, + .fake_registers = NULL, + .wlist = nvenc_wlist, + .wlist_size = ARRAY_SIZE(nvenc_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist pcie_ctl_wlist[] = { + {.reg = 0x174, .zero_in_init = true,}, + {.reg = 0x178, .zero_in_init = false,}, +}; +struct hwpm_resource_aperture pcie_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE1_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE2_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE3_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE4_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE5_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE6_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE7_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE8_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE9_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PCIE10_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C0_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C0_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C1_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C1_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C2_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C2_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C3_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C3_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C4_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C4_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C5_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C5_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C6_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C6_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C7_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C7_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C8_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C8_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C9_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C9_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_PCIE_C10_CTL_BASE, + .end_pa = NV_ADDRESS_MAP_PCIE_C10_CTL_LIMIT, + .fake_registers = NULL, + .wlist = pcie_ctl_wlist, + .wlist_size = ARRAY_SIZE(pcie_ctl_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +struct whitelist disp_wlist[] = { + {.reg = 0x1e118, .zero_in_init = true,}, + {.reg = 0x1e120, .zero_in_init = true,}, + {.reg = 0x1e124, .zero_in_init = false,}, +}; +struct hwpm_resource_aperture display_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_NVDISPLAY0_PERFMON_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_DISP_BASE, + .end_pa = NV_ADDRESS_MAP_DISP_LIMIT, + .fake_registers = NULL, + .wlist = disp_wlist, + .wlist_size = ARRAY_SIZE(disp_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, +}; + +/* + * Normally there is a 1-to-1 mapping between an MMIO aperture and a + * hwpm_resource_aperture struct. But MC MMIO apertures are used in multiple + * hwpm_resource_aperture structs. Therefore, we have to share the fake register + * arrays between these hwpm_resource_aperture structs. This is why we have to + * define the fake register arrays globally. For all other 1-to-1 mapping + * apertures the fake register arrays are directly embedded inside the + * hwpm_resource_aperture structs. + */ +u32 *mc_fake_regs[16] = {NULL}; +/* FIXME: Any missing registers? */ +struct whitelist mc_res_mss_channel_wlist[] = { + {.reg = 0x814, .zero_in_init = true,}, +}; +struct hwpm_resource_aperture mss_channel_map[] = { + { + .start_pa = NV_ADDRESS_MAP_MC0_BASE, + .end_pa = NV_ADDRESS_MAP_MC0_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC1_BASE, + .end_pa = NV_ADDRESS_MAP_MC1_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC2_BASE, + .end_pa = NV_ADDRESS_MAP_MC2_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC3_BASE, + .end_pa = NV_ADDRESS_MAP_MC3_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC4_BASE, + .end_pa = NV_ADDRESS_MAP_MC4_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC5_BASE, + .end_pa = NV_ADDRESS_MAP_MC5_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC6_BASE, + .end_pa = NV_ADDRESS_MAP_MC6_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC7_BASE, + .end_pa = NV_ADDRESS_MAP_MC7_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC8_BASE, + .end_pa = NV_ADDRESS_MAP_MC8_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC9_BASE, + .end_pa = NV_ADDRESS_MAP_MC9_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC10_BASE, + .end_pa = NV_ADDRESS_MAP_MC10_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC11_BASE, + .end_pa = NV_ADDRESS_MAP_MC11_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC12_BASE, + .end_pa = NV_ADDRESS_MAP_MC12_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC13_BASE, + .end_pa = NV_ADDRESS_MAP_MC13_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC14_BASE, + .end_pa = NV_ADDRESS_MAP_MC14_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC15_BASE, + .end_pa = NV_ADDRESS_MAP_MC15_LIMIT, + .fake_registers = NULL, + .wlist = mc_res_mss_channel_wlist, + .wlist_size = ARRAY_SIZE(mc_res_mss_channel_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTA0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTA1_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTA2_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTA3_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTB0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTB1_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTB2_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTB3_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTC0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTC1_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTC2_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTC3_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTD0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTD1_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTD2_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSCHANNELPARTD3_PERFMON_DT, + }, +}; + +/* FIXME: Any missing registers? */ +struct whitelist mss_nvlink_wlist[] = { + {.reg = 0xa30, .zero_in_init = true,}, +}; +struct hwpm_resource_aperture mss_gpu_hub_map[] = { + { + .start_pa = NV_ADDRESS_MAP_MSS_NVLINK_1_BASE, + .end_pa = NV_ADDRESS_MAP_MSS_NVLINK_1_LIMIT, + .fake_registers = NULL, + .wlist = mss_nvlink_wlist, + .wlist_size = ARRAY_SIZE(mss_nvlink_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MSS_NVLINK_2_BASE, + .end_pa = NV_ADDRESS_MAP_MSS_NVLINK_2_LIMIT, + .fake_registers = NULL, + .wlist = mss_nvlink_wlist, + .wlist_size = ARRAY_SIZE(mss_nvlink_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MSS_NVLINK_3_BASE, + .end_pa = NV_ADDRESS_MAP_MSS_NVLINK_3_LIMIT, + .fake_registers = NULL, + .wlist = mss_nvlink_wlist, + .wlist_size = ARRAY_SIZE(mss_nvlink_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MSS_NVLINK_4_BASE, + .end_pa = NV_ADDRESS_MAP_MSS_NVLINK_4_LIMIT, + .fake_registers = NULL, + .wlist = mss_nvlink_wlist, + .wlist_size = ARRAY_SIZE(mss_nvlink_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MSS_NVLINK_5_BASE, + .end_pa = NV_ADDRESS_MAP_MSS_NVLINK_5_LIMIT, + .fake_registers = NULL, + .wlist = mss_nvlink_wlist, + .wlist_size = ARRAY_SIZE(mss_nvlink_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MSS_NVLINK_6_BASE, + .end_pa = NV_ADDRESS_MAP_MSS_NVLINK_6_LIMIT, + .fake_registers = NULL, + .wlist = mss_nvlink_wlist, + .wlist_size = ARRAY_SIZE(mss_nvlink_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MSS_NVLINK_7_BASE, + .end_pa = NV_ADDRESS_MAP_MSS_NVLINK_7_LIMIT, + .fake_registers = NULL, + .wlist = mss_nvlink_wlist, + .wlist_size = ARRAY_SIZE(mss_nvlink_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MSS_NVLINK_8_BASE, + .end_pa = NV_ADDRESS_MAP_MSS_NVLINK_8_LIMIT, + .fake_registers = NULL, + .wlist = mss_nvlink_wlist, + .wlist_size = ARRAY_SIZE(mss_nvlink_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSNVLHSH0_PERFMON_DT, + }, +}; + +/* FIXME: Any missing registers? */ +struct whitelist mc0to7_res_mss_iso_niso_hub_wlist[] = { + {.reg = 0x818, .zero_in_init = true,}, + {.reg = 0x81c, .zero_in_init = true,}, +}; +/* FIXME: Any missing registers? */ +struct whitelist mc8_res_mss_iso_niso_hub_wlist[] = { + {.reg = 0x828, .zero_in_init = true,}, +}; +struct hwpm_resource_aperture mss_iso_niso_hub_map[] = { + { + .start_pa = NV_ADDRESS_MAP_MC0_BASE, + .end_pa = NV_ADDRESS_MAP_MC0_LIMIT, + .fake_registers = NULL, + .wlist = mc0to7_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc0to7_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC1_BASE, + .end_pa = NV_ADDRESS_MAP_MC1_LIMIT, + .fake_registers = NULL, + .wlist = mc0to7_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc0to7_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC2_BASE, + .end_pa = NV_ADDRESS_MAP_MC2_LIMIT, + .fake_registers = NULL, + .wlist = mc0to7_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc0to7_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC3_BASE, + .end_pa = NV_ADDRESS_MAP_MC3_LIMIT, + .fake_registers = NULL, + .wlist = mc0to7_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc0to7_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC4_BASE, + .end_pa = NV_ADDRESS_MAP_MC4_LIMIT, + .fake_registers = NULL, + .wlist = mc0to7_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc0to7_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC5_BASE, + .end_pa = NV_ADDRESS_MAP_MC5_LIMIT, + .fake_registers = NULL, + .wlist = mc0to7_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc0to7_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC6_BASE, + .end_pa = NV_ADDRESS_MAP_MC6_LIMIT, + .fake_registers = NULL, + .wlist = mc0to7_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc0to7_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC7_BASE, + .end_pa = NV_ADDRESS_MAP_MC7_LIMIT, + .fake_registers = NULL, + .wlist = mc0to7_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc0to7_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC8_BASE, + .end_pa = NV_ADDRESS_MAP_MC8_LIMIT, + .fake_registers = NULL, + .wlist = mc8_res_mss_iso_niso_hub_wlist, + .wlist_size = ARRAY_SIZE(mc8_res_mss_iso_niso_hub_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSHUB0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSHUB1_PERFMON_DT, + }, +}; + +/* FIXME: Any missing registers? */ +struct whitelist mcb_res_mss_mcf_wlist[] = { + {.reg = 0x800, .zero_in_init = true,}, + {.reg = 0x820, .zero_in_init = true,}, + {.reg = 0x80c, .zero_in_init = true,}, + {.reg = 0x824, .zero_in_init = true,}, +}; +/* FIXME: Any missing registers? */ +struct whitelist mc0to1_res_mss_mcf_wlist[] = { + {.reg = 0x810, .zero_in_init = true,}, + {.reg = 0x808, .zero_in_init = true,}, + {.reg = 0x804, .zero_in_init = true,}, +}; +/* FIXME: Any missing registers? */ +struct whitelist mc2to7_res_mss_mcf_wlist[] = { + {.reg = 0x810, .zero_in_init = true,}, +}; +struct hwpm_resource_aperture mss_mcf_map[] = { + { + .start_pa = NV_ADDRESS_MAP_MC0_BASE, + .end_pa = NV_ADDRESS_MAP_MC0_LIMIT, + .fake_registers = NULL, + .wlist = mc0to1_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mc0to1_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC1_BASE, + .end_pa = NV_ADDRESS_MAP_MC1_LIMIT, + .fake_registers = NULL, + .wlist = mc0to1_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mc0to1_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC2_BASE, + .end_pa = NV_ADDRESS_MAP_MC2_LIMIT, + .fake_registers = NULL, + .wlist = mc2to7_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mc2to7_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC3_BASE, + .end_pa = NV_ADDRESS_MAP_MC3_LIMIT, + .fake_registers = NULL, + .wlist = mc2to7_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mc2to7_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC4_BASE, + .end_pa = NV_ADDRESS_MAP_MC4_LIMIT, + .fake_registers = NULL, + .wlist = mc2to7_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mc2to7_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC5_BASE, + .end_pa = NV_ADDRESS_MAP_MC5_LIMIT, + .fake_registers = NULL, + .wlist = mc2to7_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mc2to7_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC6_BASE, + .end_pa = NV_ADDRESS_MAP_MC6_LIMIT, + .fake_registers = NULL, + .wlist = mc2to7_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mc2to7_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MC7_BASE, + .end_pa = NV_ADDRESS_MAP_MC7_LIMIT, + .fake_registers = NULL, + .wlist = mc2to7_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mc2to7_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = NV_ADDRESS_MAP_MCB_BASE, + .end_pa = NV_ADDRESS_MAP_MCB_LIMIT, + .fake_registers = NULL, + .wlist = mcb_res_mss_mcf_wlist, + .wlist_size = ARRAY_SIZE(mcb_res_mss_mcf_wlist), + .is_ip = true, + .dt_aperture = TEGRA_SOC_HWPM_INVALID_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSMCFCLIENT0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSMCFMEM0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_MSSMCFMEM1_PERFMON_DT, + }, +}; + +/* + * Normally there is a 1-to-1 mapping between an MMIO aperture and a + * hwpm_resource_aperture struct. But the PMA MMIO aperture is used in + * multiple hwpm_resource_aperture structs. Therefore, we have to share the fake + * register array between these hwpm_resource_aperture structs. This is why we + * have to define the fake register array globally. For all other 1-to-1 + * mapping apertures the fake register arrays are directly embedded inside the + * hwpm_resource_aperture structs. + */ +u32 *pma_fake_regs = NULL; +struct whitelist pma_res_pma_wlist[] = { + {.reg = 0x628, .zero_in_init = true,}, +}; +struct hwpm_resource_aperture pma_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = perfmon_wlist, + .wlist_size = ARRAY_SIZE(perfmon_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_SYS0_PERFMON_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = pma_res_pma_wlist, + .wlist_size = ARRAY_SIZE(pma_res_pma_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PMA_DT, + }, +}; + +struct whitelist pma_res_cmd_slice_rtr_wlist[] = { + {.reg = 0x0, .zero_in_init = false,}, + {.reg = 0x8, .zero_in_init = false,}, + {.reg = 0xc, .zero_in_init = false,}, + {.reg = 0x10, .zero_in_init = false,}, + {.reg = 0x14, .zero_in_init = false,}, + {.reg = 0x3c, .zero_in_init = false,}, + {.reg = 0x44, .zero_in_init = false,}, + {.reg = 0x70, .zero_in_init = false,}, + {.reg = 0x8c, .zero_in_init = false,}, + {.reg = 0x600, .zero_in_init = false,}, + {.reg = 0x604, .zero_in_init = false,}, + {.reg = 0x608, .zero_in_init = false,}, + {.reg = 0x60c, .zero_in_init = false,}, + {.reg = 0x610, .zero_in_init = false,}, + {.reg = 0x618, .zero_in_init = false,}, + {.reg = 0x61c, .zero_in_init = false,}, + {.reg = 0x620, .zero_in_init = false,}, + {.reg = 0x624, .zero_in_init = false,}, + {.reg = 0x62c, .zero_in_init = false,}, + {.reg = 0x630, .zero_in_init = false,}, + {.reg = 0x634, .zero_in_init = false,}, + {.reg = 0x638, .zero_in_init = false,}, + {.reg = 0x63c, .zero_in_init = false,}, + {.reg = 0x640, .zero_in_init = false,}, + {.reg = 0x644, .zero_in_init = false,}, + {.reg = 0x648, .zero_in_init = false,}, + {.reg = 0x64c, .zero_in_init = false,}, + {.reg = 0x650, .zero_in_init = false,}, + {.reg = 0x654, .zero_in_init = false,}, + {.reg = 0x658, .zero_in_init = false,}, + {.reg = 0x65c, .zero_in_init = false,}, + {.reg = 0x660, .zero_in_init = false,}, + {.reg = 0x664, .zero_in_init = false,}, + {.reg = 0x668, .zero_in_init = false,}, + {.reg = 0x66c, .zero_in_init = false,}, + {.reg = 0x670, .zero_in_init = false,}, + {.reg = 0x674, .zero_in_init = false,}, + {.reg = 0x678, .zero_in_init = false,}, + {.reg = 0x67c, .zero_in_init = false,}, + {.reg = 0x680, .zero_in_init = false,}, + {.reg = 0x684, .zero_in_init = false,}, + {.reg = 0x688, .zero_in_init = false,}, + {.reg = 0x68c, .zero_in_init = false,}, + {.reg = 0x690, .zero_in_init = false,}, + {.reg = 0x694, .zero_in_init = false,}, + {.reg = 0x698, .zero_in_init = false,}, + {.reg = 0x69c, .zero_in_init = false,}, + {.reg = 0x6a0, .zero_in_init = false,}, + {.reg = 0x6a4, .zero_in_init = false,}, + {.reg = 0x6a8, .zero_in_init = false,}, + {.reg = 0x6ac, .zero_in_init = false,}, + {.reg = 0x6b0, .zero_in_init = false,}, + {.reg = 0x6b4, .zero_in_init = false,}, + {.reg = 0x6b8, .zero_in_init = false,}, + {.reg = 0x6bc, .zero_in_init = false,}, + {.reg = 0x6c0, .zero_in_init = false,}, + {.reg = 0x6c4, .zero_in_init = false,}, + {.reg = 0x6c8, .zero_in_init = false,}, + {.reg = 0x6cc, .zero_in_init = false,}, + {.reg = 0x6d0, .zero_in_init = false,}, + {.reg = 0x6d4, .zero_in_init = false,}, + {.reg = 0x6d8, .zero_in_init = false,}, + {.reg = 0x6dc, .zero_in_init = false,}, + {.reg = 0x6e0, .zero_in_init = false,}, + {.reg = 0x6e4, .zero_in_init = false,}, + {.reg = 0x6e8, .zero_in_init = false,}, + {.reg = 0x6ec, .zero_in_init = false,}, + {.reg = 0x6f0, .zero_in_init = false,}, + {.reg = 0x6f4, .zero_in_init = false,}, + {.reg = 0x6f8, .zero_in_init = false,}, + {.reg = 0x6fc, .zero_in_init = false,}, + {.reg = 0x700, .zero_in_init = false,}, + {.reg = 0x704, .zero_in_init = false,}, + {.reg = 0x708, .zero_in_init = false,}, + {.reg = 0x70c, .zero_in_init = false,}, + {.reg = 0x710, .zero_in_init = false,}, + {.reg = 0x714, .zero_in_init = false,}, + {.reg = 0x718, .zero_in_init = false,}, + {.reg = 0x71c, .zero_in_init = false,}, + {.reg = 0x720, .zero_in_init = false,}, + {.reg = 0x724, .zero_in_init = false,}, + {.reg = 0x728, .zero_in_init = false,}, + {.reg = 0x72c, .zero_in_init = false,}, + {.reg = 0x730, .zero_in_init = false,}, + {.reg = 0x734, .zero_in_init = false,}, + {.reg = 0x75c, .zero_in_init = false,}, +}; +struct whitelist rtr_wlist[] = { + {.reg = 0x0, .zero_in_init = false,}, + {.reg = 0x8, .zero_in_init = false,}, + {.reg = 0xc, .zero_in_init = false,}, + {.reg = 0x10, .zero_in_init = false,}, + {.reg = 0x14, .zero_in_init = false,}, + {.reg = 0x18, .zero_in_init = false,}, + {.reg = 0x150, .zero_in_init = false,}, + {.reg = 0x154, .zero_in_init = false,}, +}; +struct hwpm_resource_aperture cmd_slice_rtr_map[] = { + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = pma_res_cmd_slice_rtr_wlist, + .wlist_size = ARRAY_SIZE(pma_res_cmd_slice_rtr_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_PMA_DT, + }, + { + .start_pa = 0, + .end_pa = 0, + .fake_registers = NULL, + .wlist = rtr_wlist, + .wlist_size = ARRAY_SIZE(rtr_wlist), + .is_ip = false, + .dt_aperture = TEGRA_SOC_HWPM_RTR_DT, + }, +}; + +struct hwpm_resource hwpm_resources[TERGA_SOC_HWPM_NUM_RESOURCES] = { + [TEGRA_SOC_HWPM_RESOURCE_VI] = { + .reserved = false, + .map_size = ARRAY_SIZE(vi_map), + .map = vi_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_ISP] = { + .reserved = false, + .map_size = ARRAY_SIZE(isp_map), + .map = isp_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_VIC] = { + .reserved = false, + .map_size = ARRAY_SIZE(vic_map), + .map = vic_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_OFA] = { + .reserved = false, + .map_size = ARRAY_SIZE(ofa_map), + .map = ofa_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_PVA] = { + .reserved = false, + .map_size = ARRAY_SIZE(pva_map), + .map = pva_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_NVDLA] = { + .reserved = false, + .map_size = ARRAY_SIZE(nvdla_map), + .map = nvdla_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_MGBE] = { + .reserved = false, + .map_size = ARRAY_SIZE(mgbe_map), + .map = mgbe_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_SCF] = { + .reserved = false, + .map_size = ARRAY_SIZE(scf_map), + .map = scf_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_NVDEC] = { + .reserved = false, + .map_size = ARRAY_SIZE(nvdec_map), + .map = nvdec_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_NVENC] = { + .reserved = false, + .map_size = ARRAY_SIZE(nvenc_map), + .map = nvenc_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_PCIE] = { + .reserved = false, + .map_size = ARRAY_SIZE(pcie_map), + .map = pcie_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_DISPLAY] = { + .reserved = false, + .map_size = ARRAY_SIZE(display_map), + .map = display_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_MSS_CHANNEL] = { + .reserved = false, + .map_size = ARRAY_SIZE(mss_channel_map), + .map = mss_channel_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_MSS_GPU_HUB] = { + .reserved = false, + .map_size = ARRAY_SIZE(mss_gpu_hub_map), + .map = mss_gpu_hub_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_MSS_ISO_NISO_HUBS] = { + .reserved = false, + .map_size = ARRAY_SIZE(mss_iso_niso_hub_map), + .map = mss_iso_niso_hub_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_MSS_MCF] = { + .reserved = false, + .map_size = ARRAY_SIZE(mss_mcf_map), + .map = mss_mcf_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_PMA] = { + .reserved = false, + .map_size = ARRAY_SIZE(pma_map), + .map = pma_map, + }, + [TEGRA_SOC_HWPM_RESOURCE_CMD_SLICE_RTR] = { + .reserved = false, + .map_size = ARRAY_SIZE(cmd_slice_rtr_map), + .map = cmd_slice_rtr_map, + }, +}; + +static bool whitelist_check(struct hwpm_resource_aperture *aperture, + u64 phys_addr) +{ + u32 idx = 0; + + if (!aperture) { + tegra_soc_hwpm_err("Aperture is NULL"); + return false; + } + if (!aperture->wlist) { + tegra_soc_hwpm_err("NULL whitelist in aperture(0x%llx - 0x%llx)", + aperture->start_pa, aperture->end_pa); + return false; + } + + for (idx = 0; idx < aperture->wlist_size; idx++) { + if (phys_addr == aperture->start_pa + aperture->wlist[idx].reg) + return true; + } + + return false; +} + +/* + * Find an aperture in which phys_addr lies. If check_reservation is true, then + * we also have to do a whitelist check. + */ +struct hwpm_resource_aperture *find_hwpm_aperture(struct tegra_soc_hwpm *hwpm, + u64 phys_addr, + bool check_reservation) +{ + struct hwpm_resource_aperture *aperture = NULL; + int res_idx = 0; + int aprt_idx = 0; + + for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { + if (check_reservation && !hwpm_resources[res_idx].reserved) + continue; + + for (aprt_idx = 0; + aprt_idx < hwpm_resources[res_idx].map_size; + aprt_idx++) { + aperture = &(hwpm_resources[res_idx].map[aprt_idx]); + if ((phys_addr >= aperture->start_pa) && + (phys_addr <= aperture->end_pa)) { + if (!check_reservation) { + /* + * This is the driver's internal usecase. + * The driver doesn't need to check + * reservation before reading/writing + * registers. + */ + tegra_soc_hwpm_dbg("Found aperture:" + " phys_addr(0x%llx)," + " aperture(0x%llx - 0x%llx)", + phys_addr, + aperture->start_pa, + aperture->end_pa); + return aperture; + } else if (whitelist_check(aperture, phys_addr)) { + /* + * This is the IOCTL (EXEC_REG_OPS) use case. + * + * If whitelist check passes, then we've found + * the right aperture. If the check fails, we + * continue checking the remaining apertures. In + * some cases (ex: PMA and MC), apertures in + * different resources may have the + * same start/end range but the + * whitelist may differ. Therefore, we have to + * check all apertures. + */ + tegra_soc_hwpm_dbg("Found aperture:" + " phys_addr(0x%llx)," + " aperture(0x%llx - 0x%llx)", + phys_addr, + aperture->start_pa, + aperture->end_pa); + return aperture; + } + } + } + } + + tegra_soc_hwpm_err("Unable to find aperture: phys(0x%llx)", phys_addr); + return NULL; +} + +static u32 fake_readl(struct tegra_soc_hwpm *hwpm, u64 phys_addr) +{ + u32 reg_val = 0; + struct hwpm_resource_aperture *aperture = NULL; + + if (!hwpm->fake_registers_enabled) { + tegra_soc_hwpm_err("Fake registers are disabled!"); + return 0; + } + + aperture = find_hwpm_aperture(hwpm, phys_addr, false); + if (!aperture) { + tegra_soc_hwpm_err("Invalid reg op address(0x%llx)", phys_addr); + return 0; + } + + reg_val = aperture->fake_registers[(phys_addr - aperture->start_pa)/4]; + return reg_val; +} + +static void fake_writel(struct tegra_soc_hwpm *hwpm, + u64 phys_addr, + u32 val) +{ + struct hwpm_resource_aperture *aperture = NULL; + + if (!hwpm->fake_registers_enabled) { + tegra_soc_hwpm_err("Fake registers are disabled!"); + return; + } + + aperture = find_hwpm_aperture(hwpm, phys_addr, false); + if (!aperture) { + tegra_soc_hwpm_err("Invalid reg op address(0x%llx)", phys_addr); + return; + } + + aperture->fake_registers[(phys_addr - aperture->start_pa)/4] = val; +} + +/* Read a HWPM (PERFMON, PMA, or RTR) register */ +u32 hwpm_readl(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_dt_aperture dt_aperture, + u32 reg) +{ + if ((dt_aperture < 0) || + (dt_aperture >= TEGRA_SOC_HWPM_NUM_DT_APERTURES)) { + tegra_soc_hwpm_err("Invalid dt aperture(%d)", dt_aperture); + return 0; + } + + tegra_soc_hwpm_dbg("reg read: dt_aperture(%d), reg(0x%x)", + dt_aperture, reg); + + if (hwpm->fake_registers_enabled) { + u64 base_pa = 0; + + if (IS_PERFMON(dt_aperture)) + base_pa = PERFMON_BASE(dt_aperture); + else if (dt_aperture == TEGRA_SOC_HWPM_PMA_DT) + base_pa = NV_ADDRESS_MAP_PMA_BASE; + else + base_pa = NV_ADDRESS_MAP_RTR_BASE; + + return fake_readl(hwpm, base_pa + reg); + } else { + return readl(hwpm->dt_apertures[dt_aperture] + reg); + } +} + +/* Write a HWPM (PERFMON, PMA, or RTR) register */ +void hwpm_writel(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_dt_aperture dt_aperture, + u32 reg, + u32 val) +{ + if ((dt_aperture < 0) || + (dt_aperture >= TEGRA_SOC_HWPM_NUM_DT_APERTURES)) { + tegra_soc_hwpm_err("Invalid dt aperture(%d)", dt_aperture); + return; + } + + tegra_soc_hwpm_dbg("reg write: dt_aperture(%d), reg(0x%x), val(0x%x)", + dt_aperture, reg, val); + + if (hwpm->fake_registers_enabled) { + u64 base_pa = 0; + + if (IS_PERFMON(dt_aperture)) + base_pa = PERFMON_BASE(dt_aperture); + else if (dt_aperture == TEGRA_SOC_HWPM_PMA_DT) + base_pa = NV_ADDRESS_MAP_PMA_BASE; + else + base_pa = NV_ADDRESS_MAP_RTR_BASE; + + fake_writel(hwpm, base_pa + reg, val); + } else { + writel(val, hwpm->dt_apertures[dt_aperture] + reg); + } +} + +/* + * FIXME: Remove all non-HWPM register reads from the driver. + * Replace them with inter-driver APIs? + */ +u32 ip_readl(struct tegra_soc_hwpm *hwpm, u64 phys_addr) +{ + tegra_soc_hwpm_dbg("reg read: phys_addr(0x%llx)", phys_addr); + + if (hwpm->fake_registers_enabled) { + return fake_readl(hwpm, phys_addr); + } else { + void __iomem *ptr = NULL; + u32 val = 0; + + ptr = ioremap(phys_addr, 0x4); + if (!ptr) { + tegra_soc_hwpm_err("Failed to map register(0x%llx)", + phys_addr); + return 0; + } + val = __raw_readl(ptr); + iounmap(ptr); + return val; + } +} + +/* + * FIXME: Remove all non-HWPM register writes from the driver. + * Replace them with inter-driver APIs? + */ +void ip_writel(struct tegra_soc_hwpm *hwpm, u64 phys_addr, u32 val) +{ + tegra_soc_hwpm_dbg("reg write: phys_addr(0x%llx), val(0x%x)", + phys_addr, val); + + if (hwpm->fake_registers_enabled) { + fake_writel(hwpm, phys_addr, val); + } else { + void __iomem *ptr = NULL; + + ptr = ioremap(phys_addr, 0x4); + if (!ptr) { + tegra_soc_hwpm_err("Failed to map register(0x%llx)", + phys_addr); + return; + } + __raw_writel(val, ptr); + iounmap(ptr); + } +} + +/* + * Read a register from the EXEC_REG_OPS IOCTL. It is assumed that the whitelist + * check has been done before calling this function. + */ +u32 ioctl_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_resource_aperture *aperture, + u64 addr) +{ + u32 reg_val = 0; + + if (!aperture) { + tegra_soc_hwpm_err("aperture is NULL"); + return 0; + } + + if (aperture->is_ip) { + reg_val = ip_readl(hwpm, addr); + } else { + reg_val = hwpm_readl(hwpm, + aperture->dt_aperture, + addr - aperture->start_pa); + } + return reg_val; +} + +/* + * Write a register from the EXEC_REG_OPS IOCTL. It is assumed that the + * whitelist check has been done before calling this function. + */ +void ioctl_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_resource_aperture *aperture, + u64 addr, + u32 val) +{ + if (!aperture) { + tegra_soc_hwpm_err("aperture is NULL"); + return; + } + + if (aperture->is_ip) { + ip_writel(hwpm, addr, val); + } else { + hwpm_writel(hwpm, + aperture->dt_aperture, + addr - aperture->start_pa, + val); + } +} + +/* Read Modify Write register operation */ +int reg_rmw(struct tegra_soc_hwpm *hwpm, + struct hwpm_resource_aperture *aperture, + enum tegra_soc_hwpm_dt_aperture dt_aperture, + u64 addr, + u32 field_mask, + u32 field_val, + bool is_ioctl, + bool is_ip) +{ + u32 reg_val = 0; + + if (is_ioctl) { + if (!aperture) { + tegra_soc_hwpm_err("aperture is NULL"); + return -EIO; + } + } + if (!is_ip) { + if ((dt_aperture < 0) || + (dt_aperture > TEGRA_SOC_HWPM_NUM_DT_APERTURES)) { + tegra_soc_hwpm_err("Invalid dt_aperture(%d)", + dt_aperture); + return -EIO; + } + } + + /* Read current register value */ + if (is_ioctl) + reg_val = ioctl_readl(hwpm, aperture, addr); + else if (is_ip) + reg_val = ip_readl(hwpm, addr); + else + reg_val = hwpm_readl(hwpm, dt_aperture, addr); + + /* Clear and write masked bits */ + reg_val &= ~field_mask; + reg_val |= field_val & field_mask; + + /* Write modified value to register */ + if (is_ioctl) + ioctl_writel(hwpm, aperture, addr, reg_val); + else if (is_ip) + ip_writel(hwpm, addr, reg_val); + else + hwpm_writel(hwpm, dt_aperture, addr, reg_val); + + return 0; +} diff --git a/tegra-soc-hwpm-io.h b/tegra-soc-hwpm-io.h new file mode 100644 index 0000000..2bb9345 --- /dev/null +++ b/tegra-soc-hwpm-io.h @@ -0,0 +1,136 @@ +/* + * tegra-soc-hwpm-io.h: + * This header defines register read/write APIs for the Tegra SOC HWPM driver. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef TEGRA_SOC_HWPM_IO_H +#define TEGRA_SOC_HWPM_IO_H + +#include "tegra-soc-hwpm.h" + +/* Mask and shift field_val so it can be written to a register */ +#define HWPM_REG_F(field, field_val) \ + (((field_val) << field##_SHIFT) & field##_MASK) + +/* Extract a field's value from a register */ +#define HWPM_REG_V(field, reg_val) \ + (((reg_val) & field##_MASK) >> field##_SHIFT) + +/* + * Check if field_val is set in reg_val. field_val is already masked and + * shifted to the correct location. + */ +#define HWPM_REG_CHECK(reg_val, field_mask, field_val) \ + (((reg_val) & (field_mask)) == ((field_val) & (field_mask))) + +/* Mask and shift field_val. Then check if field_val is set in reg_val. */ +#define HWPM_REG_CHECK_F(reg_val, field, field_val) \ + (((reg_val) & field##_MASK) == HWPM_REG_F(field, (field_val))) + +struct whitelist { + u64 reg; + bool zero_in_init; +}; + +struct hwpm_resource_aperture { + /* + * If false, this is a HWPM aperture (PERFRMON, PMA or RTR). Else this + * is a non-HWPM aperture (ex: VIC). + */ + bool is_ip; + + /* + * If is_ip == false, specify dt_aperture for readl/writel operations. + * If is_ip == true, dt_aperture == TEGRA_SOC_HWPM_INVALID_DT. + */ + enum tegra_soc_hwpm_dt_aperture dt_aperture; + + /* Physical aperture */ + u64 start_pa; + u64 end_pa; + + /* Whitelist */ + struct whitelist *wlist; + u64 wlist_size; + + /* Fake registers for VDK which doesn't have a SOC HWPM fmodel */ + u32 *fake_registers; +}; + +struct hwpm_resource { + bool reserved; + u32 map_size; + struct hwpm_resource_aperture *map; +}; + +/* Externs */ +extern struct hwpm_resource hwpm_resources[TERGA_SOC_HWPM_NUM_RESOURCES]; +extern u32 *pma_fake_regs; +extern u32 *mc_fake_regs[16]; +extern struct hwpm_resource_aperture mss_channel_map[]; +extern struct hwpm_resource_aperture mss_iso_niso_hub_map[]; +extern struct hwpm_resource_aperture mss_mcf_map[]; +extern struct hwpm_resource_aperture pma_map[]; +extern struct hwpm_resource_aperture cmd_slice_rtr_map[]; + +struct hwpm_resource_aperture *find_hwpm_aperture(struct tegra_soc_hwpm *hwpm, + u64 phys_addr, + bool check_reservation); +u32 hwpm_readl(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_dt_aperture dt_aperture, + u32 reg); +void hwpm_writel(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_dt_aperture dt_aperture, + u32 reg, + u32 val); +u32 ip_readl(struct tegra_soc_hwpm *hwpm, u64 phys_addr); +void ip_writel(struct tegra_soc_hwpm *hwpm, u64 phys_addr, u32 val); +u32 ioctl_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_resource_aperture *aperture, + u64 addr); +void ioctl_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_resource_aperture *aperture, + u64 addr, + u32 val); +int reg_rmw(struct tegra_soc_hwpm *hwpm, + struct hwpm_resource_aperture *aperture, + enum tegra_soc_hwpm_dt_aperture dt_aperture, + u64 addr, + u32 field_mask, + u32 field_val, + bool is_ioctl, + bool is_ip); +#define DRIVER_REG_RMW(hwpm, dt_aperture, reg, field, field_val, is_ip) \ + reg_rmw(hwpm, \ + NULL, \ + dt_aperture, \ + reg, \ + field##_MASK, \ + HWPM_REG_F(field, field_val), \ + false, \ + is_ip) +#define IOCTL_REG_RMW(hwpm, aperture, addr, field_mask, field_val) \ + reg_rmw(hwpm, \ + aperture, \ + aperture->dt_aperture, \ + addr, \ + field_mask, \ + field_val, \ + true, \ + aperture->is_ip) + +#endif /* TEGRA_SOC_HWPM_IO_H */ diff --git a/tegra-soc-hwpm-ioctl.c b/tegra-soc-hwpm-ioctl.c new file mode 100644 index 0000000..6cdf103 --- /dev/null +++ b/tegra-soc-hwpm-ioctl.c @@ -0,0 +1,1679 @@ +/* + * tegra-soc-hwpm-ioctl.c: + * This file adds IOCTL handlers for the Tegra SOC HWPM driver. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +/* FIXME: Is this include needed for struct resource? */ +#if 0 +#include +#endif +#include +#include + +#include + +#include "tegra-soc-hwpm.h" +#include "tegra-soc-hwpm-io.h" + +struct tegra_soc_hwpm_ioctl { + const char *const name; + const size_t struct_size; + int (*handler)(struct tegra_soc_hwpm *, void *); +}; + +static int device_info_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); +static int timer_relation_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); +static int reserve_resource_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); +static int alloc_pma_stream_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); +static int bind_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); +static int query_whitelist_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); +static int exec_reg_ops_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); +static int update_get_put_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); + +static const struct tegra_soc_hwpm_ioctl ioctls[] = { + [TEGRA_SOC_HWPM_IOCTL_DEVICE_INFO] = { + .name = "device_info", + .struct_size = sizeof(struct tegra_soc_hwpm_device_info), + .handler = device_info_ioctl, + }, + [TEGRA_SOC_HWPM_IOCTL_GET_GPU_CPU_TIME_CORRELATION_INFO] = { + .name = "timer_relation", + .struct_size = sizeof(struct tegra_soc_hwpm_timer_relation), + .handler = timer_relation_ioctl, + }, + [TEGRA_SOC_HWPM_IOCTL_RESERVE_RESOURCE] = { + .name = "reserve_resource", + .struct_size = sizeof(struct tegra_soc_hwpm_reserve_resource), + .handler = reserve_resource_ioctl, + }, + [TEGRA_SOC_HWPM_IOCTL_ALLOC_PMA_STREAM] = { + .name = "alloc_pma_stream", + .struct_size = sizeof(struct tegra_soc_hwpm_alloc_pma_stream), + .handler = alloc_pma_stream_ioctl, + }, + [TEGRA_SOC_HWPM_IOCTL_BIND] = { + .name = "bind", + .struct_size = 0, + .handler = bind_ioctl, + }, + [TEGRA_SOC_HWPM_IOCTL_QUERY_WHITELIST] = { + .name = "query_whitelist", + .struct_size = sizeof(struct tegra_soc_hwpm_query_whitelist), + .handler = query_whitelist_ioctl, + }, + [TEGRA_SOC_HWPM_IOCTL_EXEC_REG_OPS] = { + .name = "exec_reg_ops", + .struct_size = sizeof(struct tegra_soc_hwpm_exec_reg_ops), + .handler = exec_reg_ops_ioctl, + }, + [TEGRA_SOC_HWPM_IOCTL_UPDATE_GET_PUT] = { + .name = "update_get_put", + .struct_size = sizeof(struct tegra_soc_hwpm_update_get_put), + .handler = update_get_put_ioctl, + }, +}; + +static int device_info_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ +/* FIXME: Implement IOCTL */ +#if 0 + struct tegra_soc_hwpm_device_info *device_info = + (struct tegra_soc_hwpm_device_info *)ioctl_struct; +#endif + + tegra_soc_hwpm_err("The DEVICE_INFO IOCTL is currently not implemented"); + return -ENXIO; +} + +static int timer_relation_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ +/* FIXME: Implement IOCTL */ +#if 0 + struct tegra_soc_hwpm_timer_relation *timer_relation = + (struct tegra_soc_hwpm_timer_relation *)ioctl_struct; +#endif + + tegra_soc_hwpm_err("The GET_GPU_CPU_TIME_CORRELATION_INFO IOCTL is" + " currently not implemented"); + return -ENXIO; + +} + +static u32 **get_mc_fake_regs(struct tegra_soc_hwpm *hwpm, + struct hwpm_resource_aperture *aperture) +{ + if (!hwpm->fake_registers_enabled) + return NULL; + if (!aperture) { + tegra_soc_hwpm_err("aperture is NULL"); + return NULL; + } + + switch (aperture->start_pa) { + case NV_ADDRESS_MAP_MC0_BASE: + return &mc_fake_regs[0]; + case NV_ADDRESS_MAP_MC1_BASE: + return &mc_fake_regs[1]; + case NV_ADDRESS_MAP_MC2_BASE: + return &mc_fake_regs[2]; + case NV_ADDRESS_MAP_MC3_BASE: + return &mc_fake_regs[3]; + case NV_ADDRESS_MAP_MC4_BASE: + return &mc_fake_regs[4]; + case NV_ADDRESS_MAP_MC5_BASE: + return &mc_fake_regs[5]; + case NV_ADDRESS_MAP_MC6_BASE: + return &mc_fake_regs[6]; + case NV_ADDRESS_MAP_MC7_BASE: + return &mc_fake_regs[7]; + case NV_ADDRESS_MAP_MC8_BASE: + return &mc_fake_regs[8]; + case NV_ADDRESS_MAP_MC9_BASE: + return &mc_fake_regs[9]; + case NV_ADDRESS_MAP_MC10_BASE: + return &mc_fake_regs[10]; + case NV_ADDRESS_MAP_MC11_BASE: + return &mc_fake_regs[11]; + case NV_ADDRESS_MAP_MC12_BASE: + return &mc_fake_regs[12]; + case NV_ADDRESS_MAP_MC13_BASE: + return &mc_fake_regs[13]; + case NV_ADDRESS_MAP_MC14_BASE: + return &mc_fake_regs[14]; + case NV_ADDRESS_MAP_MC15_BASE: + return &mc_fake_regs[15]; + default: + return NULL; + } +} + +static void set_mc_fake_regs(struct tegra_soc_hwpm *hwpm, + struct hwpm_resource_aperture *aperture, + bool set_null) +{ + u32 *fake_regs = NULL; + + if (!aperture) { + tegra_soc_hwpm_err("aperture is NULL"); + return; + } + + switch (aperture->start_pa) { + case NV_ADDRESS_MAP_MC0_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[0]; + mss_channel_map[0].fake_registers = fake_regs; + mss_iso_niso_hub_map[0].fake_registers = fake_regs; + mss_mcf_map[0].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC1_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[1]; + mss_channel_map[1].fake_registers = fake_regs; + mss_iso_niso_hub_map[1].fake_registers = fake_regs; + mss_mcf_map[1].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC2_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[2]; + mss_channel_map[2].fake_registers = fake_regs; + mss_iso_niso_hub_map[2].fake_registers = fake_regs; + mss_mcf_map[2].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC3_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[3]; + mss_channel_map[3].fake_registers = fake_regs; + mss_iso_niso_hub_map[3].fake_registers = fake_regs; + mss_mcf_map[3].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC4_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[4]; + mss_channel_map[4].fake_registers = fake_regs; + mss_iso_niso_hub_map[4].fake_registers = fake_regs; + mss_mcf_map[4].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC5_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[5]; + mss_channel_map[5].fake_registers = fake_regs; + mss_iso_niso_hub_map[5].fake_registers = fake_regs; + mss_mcf_map[5].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC6_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[6]; + mss_channel_map[6].fake_registers = fake_regs; + mss_iso_niso_hub_map[6].fake_registers = fake_regs; + mss_mcf_map[6].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC7_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[7]; + mss_channel_map[7].fake_registers = fake_regs; + mss_iso_niso_hub_map[7].fake_registers = fake_regs; + mss_mcf_map[7].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC8_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[8]; + mss_channel_map[8].fake_registers = fake_regs; + mss_iso_niso_hub_map[8].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC9_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[9]; + mss_channel_map[9].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC10_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[10]; + mss_channel_map[10].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC11_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[11]; + mss_channel_map[11].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC12_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[12]; + mss_channel_map[12].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC13_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[13]; + mss_channel_map[13].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC14_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[14]; + mss_channel_map[14].fake_registers = fake_regs; + break; + case NV_ADDRESS_MAP_MC15_BASE: + fake_regs = (!hwpm->fake_registers_enabled || set_null) ? + NULL : mc_fake_regs[15]; + mss_channel_map[15].fake_registers = fake_regs; + break; + default: + break; + } +} + +static int reserve_resource_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ + int ret = 0; + struct tegra_soc_hwpm_reserve_resource *reserve_resource = + (struct tegra_soc_hwpm_reserve_resource *)ioctl_struct; + u32 resource = reserve_resource->resource; + struct hwpm_resource_aperture *aperture = NULL; + int aprt_idx = 0; + + if (hwpm->bind_completed) { + tegra_soc_hwpm_err("The RESERVE_RESOURCE IOCTL can only be" + " called before the BIND IOCTL."); + return -EPERM; + } + + /* + * FIXME: Tell IPs which are being profiled to power up IP and + * disable power management + */ + /* Map reserved apertures and allocate fake register arrays if needed */ + for (aprt_idx = 0; + aprt_idx < hwpm_resources[resource].map_size; + aprt_idx++) { + aperture = &(hwpm_resources[resource].map[aprt_idx]); + if ((aperture->dt_aperture == TEGRA_SOC_HWPM_PMA_DT) || + (aperture->dt_aperture == TEGRA_SOC_HWPM_RTR_DT)) { + /* PMA and RTR apertures are handled in open(fd) */ + continue; + } else if (IS_PERFMON(aperture->dt_aperture)) { + struct resource *res = NULL; + u64 num_regs = 0; + + tegra_soc_hwpm_dbg("Found PERFMON(0x%llx - 0x%llx)", + aperture->start_pa, aperture->end_pa); + + hwpm->dt_apertures[aperture->dt_aperture] = + of_iomap(hwpm->np, aperture->dt_aperture); + if (!hwpm->dt_apertures[aperture->dt_aperture]) { + tegra_soc_hwpm_err("Couldn't map PERFMON(%d)", + aperture->dt_aperture); + ret = -ENOMEM; + goto fail; + } + + res = platform_get_resource(hwpm->pdev, + IORESOURCE_MEM, + aperture->dt_aperture); + if ((!res) || (res->start == 0) || (res->end == 0)) { + tegra_soc_hwpm_err("Invalid resource for PERFMON(%d)", + aperture->dt_aperture); + ret = -ENOMEM; + goto fail; + } + aperture->start_pa = res->start; + aperture->end_pa = res->end; + + if (hwpm->fake_registers_enabled) { + num_regs = (aperture->end_pa + 1 - aperture->start_pa) / + sizeof(*aperture->fake_registers); + aperture->fake_registers = + (u32 *)kzalloc(sizeof(*aperture->fake_registers) * + num_regs, + GFP_KERNEL); + if (!aperture->fake_registers) { + tegra_soc_hwpm_err("Aperture(0x%llx - 0x%llx):" + " Couldn't allocate memory for fake" + " registers", + aperture->start_pa, + aperture->end_pa); + ret = -ENOMEM; + goto fail; + } + } + } else { /* IP apertures */ + if (hwpm->fake_registers_enabled) { + u64 num_regs = 0; + u32 **fake_regs = get_mc_fake_regs(hwpm, aperture); + if (!fake_regs) + fake_regs = &aperture->fake_registers; + + num_regs = (aperture->end_pa + 1 - aperture->start_pa) / + sizeof(*(*fake_regs)); + *fake_regs = + (u32 *)kzalloc(sizeof(*(*fake_regs)) * num_regs, + GFP_KERNEL); + if (!(*fake_regs)) { + tegra_soc_hwpm_err("Aperture(0x%llx - 0x%llx):" + " Couldn't allocate memory for fake" + " registers", + aperture->start_pa, + aperture->end_pa); + ret = -ENOMEM; + goto fail; + } + + set_mc_fake_regs(hwpm, aperture, false); + } + } + } + + hwpm_resources[resource].reserved = true; + goto success; + +fail: + for (aprt_idx = 0; + aprt_idx < hwpm_resources[resource].map_size; + aprt_idx++) { + aperture = &(hwpm_resources[resource].map[aprt_idx]); + if ((aperture->dt_aperture == TEGRA_SOC_HWPM_PMA_DT) || + (aperture->dt_aperture == TEGRA_SOC_HWPM_RTR_DT)) { + /* PMA and RTR apertures are handled in open(fd) */ + continue; + } else if (IS_PERFMON(aperture->dt_aperture)) { + if (hwpm->dt_apertures[aperture->dt_aperture]) { + iounmap(hwpm->dt_apertures[aperture->dt_aperture]); + hwpm->dt_apertures[aperture->dt_aperture] = NULL; + } + + aperture->start_pa = 0; + aperture->end_pa = 0; + + if (aperture->fake_registers) { + kfree(aperture->fake_registers); + aperture->fake_registers = NULL; + } + } else { /* IP apertures */ + if (aperture->fake_registers) { + kfree(aperture->fake_registers); + aperture->fake_registers = NULL; + set_mc_fake_regs(hwpm, aperture, true); + } + } + } + + hwpm_resources[resource].reserved = false; + +success: + return ret; + +} + +static int alloc_pma_stream_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ + int ret = 0; + u32 reg_val = 0; + u32 outbase_lo = 0; + u32 outbase_hi = 0; + u32 outsize = 0; + u32 mem_bytes_addr = 0; + struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream = + (struct tegra_soc_hwpm_alloc_pma_stream *)ioctl_struct; + + if (hwpm->bind_completed) { + tegra_soc_hwpm_err("The ALLOC_PMA_STREAM IOCTL can only be" + " called before the BIND IOCTL."); + return -EPERM; + } + + if (alloc_pma_stream->stream_buf_size == 0) { + tegra_soc_hwpm_err("stream_buf_size is 0"); + return -EINVAL; + } + if (alloc_pma_stream->stream_buf_fd == 0) { + tegra_soc_hwpm_err("Invalid stream_buf_fd"); + return -EINVAL; + } + if (alloc_pma_stream->mem_bytes_buf_fd == 0) { + tegra_soc_hwpm_err("Invalid mem_bytes_buf_fd"); + return -EINVAL; + } + + /* Memory map stream buffer */ + hwpm->stream_dma_buf = dma_buf_get(alloc_pma_stream->stream_buf_fd); + if (IS_ERR(hwpm->stream_dma_buf)) { + tegra_soc_hwpm_err("Unable to get stream dma_buf"); + ret = PTR_ERR(hwpm->stream_dma_buf); + goto fail; + } + hwpm->stream_attach = dma_buf_attach(hwpm->stream_dma_buf, hwpm->dev); + if (IS_ERR(hwpm->stream_attach)) { + tegra_soc_hwpm_err("Unable to attach stream dma_buf"); + ret = PTR_ERR(hwpm->stream_attach); + goto fail; + } + hwpm->stream_sgt = dma_buf_map_attachment(hwpm->stream_attach, + DMA_FROM_DEVICE); + if (IS_ERR(hwpm->stream_sgt)) { + tegra_soc_hwpm_err("Unable to map stream attachment"); + ret = PTR_ERR(hwpm->stream_sgt); + goto fail; + } + alloc_pma_stream->stream_buf_pma_va = + sg_dma_address(hwpm->stream_sgt->sgl); + if (alloc_pma_stream->stream_buf_pma_va == 0) { + tegra_soc_hwpm_err("Invalid stream buffer SMMU IOVA"); + ret = -ENXIO; + goto fail; + } + tegra_soc_hwpm_dbg("stream_buf_pma_va = 0x%llx", + alloc_pma_stream->stream_buf_pma_va); + + /* Memory map mem bytes buffer */ + hwpm->mem_bytes_dma_buf = + dma_buf_get(alloc_pma_stream->mem_bytes_buf_fd); + if (IS_ERR(hwpm->mem_bytes_dma_buf)) { + tegra_soc_hwpm_err("Unable to get mem bytes dma_buf"); + ret = PTR_ERR(hwpm->mem_bytes_dma_buf); + goto fail; + } + hwpm->mem_bytes_attach = dma_buf_attach(hwpm->mem_bytes_dma_buf, + hwpm->dev); + if (IS_ERR(hwpm->mem_bytes_attach)) { + tegra_soc_hwpm_err("Unable to attach mem bytes dma_buf"); + ret = PTR_ERR(hwpm->mem_bytes_attach); + goto fail; + } + hwpm->mem_bytes_sgt = dma_buf_map_attachment(hwpm->mem_bytes_attach, + DMA_FROM_DEVICE); + if (IS_ERR(hwpm->mem_bytes_sgt)) { + tegra_soc_hwpm_err("Unable to map mem bytes attachment"); + ret = PTR_ERR(hwpm->mem_bytes_sgt); + goto fail; + } + hwpm->mem_bytes_kernel = dma_buf_vmap(hwpm->mem_bytes_dma_buf); + if (!hwpm->mem_bytes_kernel) { + tegra_soc_hwpm_err("Unable to map mem_bytes buffer into kernel VA space"); + ret = -ENOMEM; + goto fail; + } + memset(hwpm->mem_bytes_kernel, 0, 32); + + outbase_lo = alloc_pma_stream->stream_buf_pma_va & 0xffffffffULL; + outbase_lo >>= NV_PERF_PMASYS_CHANNEL_OUTBASE_PTR_SHIFT; + reg_val = HWPM_REG_F(NV_PERF_PMASYS_CHANNEL_OUTBASE_PTR, + outbase_lo); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTBASE_CH0, + reg_val); + tegra_soc_hwpm_dbg("OUTBASE = 0x%x", reg_val); + + outbase_hi = (alloc_pma_stream->stream_buf_pma_va >> 32) & 0xff; + outbase_hi >>= NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER_PTR_SHIFT; + reg_val = HWPM_REG_F(NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER_PTR, + outbase_hi); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER_CH0, + reg_val); + tegra_soc_hwpm_dbg("OUTBASEUPPER = 0x%x", reg_val); + + outsize = alloc_pma_stream->stream_buf_size >> + NV_PERF_PMASYS_CHANNEL_OUTSIZE_NUMBYTES_SHIFT; + reg_val = HWPM_REG_F(NV_PERF_PMASYS_CHANNEL_OUTSIZE_NUMBYTES, + outsize); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTSIZE_CH0, + reg_val); + tegra_soc_hwpm_dbg("OUTSIZE = 0x%x", reg_val); + + mem_bytes_addr = sg_dma_address(hwpm->mem_bytes_sgt->sgl) & 0xffffffffULL; + mem_bytes_addr >>= NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR_PTR_SHIFT; + reg_val = HWPM_REG_F(NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR_PTR, + mem_bytes_addr); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR_CH0, + reg_val); + tegra_soc_hwpm_dbg("MEM_BYTES_ADDR = 0x%x", reg_val); + + reg_val = HWPM_REG_F(NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_VALID, + NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_VALID_TRUE); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_CH0, + reg_val); + + return 0; + +fail: + reg_val = HWPM_REG_F(NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_VALID, + NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_VALID_FALSE); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_MEM_BLOCK_CH0, + reg_val); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTBASE_CH0, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER_CH0, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTSIZE_CH0, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR_CH0, + 0); + + alloc_pma_stream->stream_buf_pma_va = 0; + + if (hwpm->stream_sgt && (!IS_ERR(hwpm->stream_sgt))) { + dma_buf_unmap_attachment(hwpm->stream_attach, + hwpm->stream_sgt, + DMA_FROM_DEVICE); + } + hwpm->stream_sgt = NULL; + + if (hwpm->stream_attach && (!IS_ERR(hwpm->stream_attach))) { + dma_buf_detach(hwpm->stream_dma_buf, hwpm->stream_attach); + } + hwpm->stream_attach = NULL; + + if (hwpm->stream_dma_buf && (!IS_ERR(hwpm->stream_dma_buf))) { + dma_buf_put(hwpm->stream_dma_buf); + } + hwpm->stream_dma_buf = NULL; + + if (hwpm->mem_bytes_kernel) { + dma_buf_vunmap(hwpm->mem_bytes_dma_buf, + hwpm->mem_bytes_kernel); + hwpm->mem_bytes_kernel = NULL; + } + if (hwpm->mem_bytes_sgt && (!IS_ERR(hwpm->mem_bytes_sgt))) { + dma_buf_unmap_attachment(hwpm->mem_bytes_attach, + hwpm->mem_bytes_sgt, + DMA_FROM_DEVICE); + } + hwpm->mem_bytes_sgt = NULL; + + if (hwpm->mem_bytes_attach && (!IS_ERR(hwpm->mem_bytes_attach))) { + dma_buf_detach(hwpm->mem_bytes_dma_buf, hwpm->mem_bytes_attach); + } + hwpm->mem_bytes_attach = NULL; + + if (hwpm->mem_bytes_dma_buf && (!IS_ERR(hwpm->mem_bytes_dma_buf))) { + dma_buf_put(hwpm->mem_bytes_dma_buf); + } + hwpm->mem_bytes_dma_buf = NULL; + + return ret; +} + +static int bind_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ + int ret = 0; + int res_idx = 0; + int aprt_idx = 0; + u32 wlist_idx = 0; + struct hwpm_resource_aperture *aperture = NULL; + + for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { + if (!hwpm_resources[res_idx].reserved) + continue; + tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); + + for (aprt_idx = 0; + aprt_idx < hwpm_resources[res_idx].map_size; + aprt_idx++) { + aperture = &(hwpm_resources[res_idx].map[aprt_idx]); + + /* Zero out necessary registers */ + if (aperture->wlist) { + for (wlist_idx = 0; + wlist_idx < aperture->wlist_size; + wlist_idx++) { + if (aperture->wlist[wlist_idx].zero_in_init) { + ioctl_writel(hwpm, + aperture, + aperture->start_pa + + aperture->wlist[wlist_idx].reg, + 0); + } + } + } else { + tegra_soc_hwpm_err("NULL whitelist in aperture(0x%llx - 0x%llx)", + aperture->start_pa, + aperture->end_pa); + } + + /* + * Enable reporting of PERFMON status to + * NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED + */ + if (IS_PERFMON(aperture->dt_aperture)) { + tegra_soc_hwpm_dbg("Found PERFMON(0x%llx - 0x%llx)", + aperture->start_pa, + aperture->end_pa); + ret = DRIVER_REG_RMW(hwpm, + aperture->dt_aperture, + NV_PERF_PMMSYS_SYS0_ENGINESTATUS, + NV_PERF_PMMSYS_SYS0_ENGINESTATUS_ENABLE, + NV_PERF_PMMSYS_SYS0_ENGINESTATUS_ENABLE_OUT, + false); + if (ret < 0) { + tegra_soc_hwpm_err("Unable to set PMM ENGINESTATUS_ENABLE" + " for PERFMON(0x%llx - 0x%llx)", + aperture->start_pa, + aperture->end_pa); + return -EIO; + } + } + } + } + + hwpm->bind_completed = true; + return 0; +} + +static int query_whitelist_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ + int ret = 0; + int res_idx = 0; + int aprt_idx = 0; + struct hwpm_resource_aperture *aperture = NULL; + struct tegra_soc_hwpm_query_whitelist *query_whitelist = + (struct tegra_soc_hwpm_query_whitelist *)ioctl_struct; + + if (!hwpm->bind_completed) { + tegra_soc_hwpm_err("The QUERY_WHITELIST IOCTL can only be called" + " after the BIND IOCTL."); + return -EPERM; + } + + if (!query_whitelist->whitelist) { /* Return whitelist_size */ + if (hwpm->full_wlist_size >= 0) { + query_whitelist->whitelist_size = hwpm->full_wlist_size; + return 0; + } + + hwpm->full_wlist_size = 0; + for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { + if (!(hwpm_resources[res_idx].reserved)) + continue; + tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); + + for (aprt_idx = 0; + aprt_idx < hwpm_resources[res_idx].map_size; + aprt_idx++) { + aperture = &(hwpm_resources[res_idx].map[aprt_idx]); + if (aperture->wlist) { + hwpm->full_wlist_size += aperture->wlist_size; + } else { + tegra_soc_hwpm_err("NULL whitelist in aperture(0x%llx - 0x%llx)", + aperture->start_pa, + aperture->end_pa); + } + + } + } + + query_whitelist->whitelist_size = hwpm->full_wlist_size; + } else { /* Fill in whitelist array */ + unsigned long user_va = + (unsigned long)(query_whitelist->whitelist); + unsigned long offset = user_va & ~PAGE_MASK; + u64 wlist_buf_size = 0; + u64 num_pages = 0; + long pinned_pages = 0; + struct page **pages = NULL; + long page_idx = 0; + void *full_wlist = NULL; + u64 *full_wlist_u64 = NULL; + u32 full_wlist_idx = 0; + u32 aprt_wlist_idx = 0; + + if (hwpm->full_wlist_size < 0) { + tegra_soc_hwpm_err("Invalid whitelist size"); + return -EINVAL; + } + wlist_buf_size = hwpm->full_wlist_size * + sizeof(*(query_whitelist->whitelist)); + + /* Memory map user buffer into kernel address space */ + num_pages = DIV_ROUND_UP(offset + wlist_buf_size, PAGE_SIZE); + pages = (struct page **)kzalloc(sizeof(*pages) * num_pages, + GFP_KERNEL); + if (!pages) { + tegra_soc_hwpm_err("Couldn't allocate memory for pages array"); + ret = -ENOMEM; + goto wlist_unmap; + } + pinned_pages = get_user_pages(user_va & PAGE_MASK, + num_pages, + 0, + pages, + NULL); + if (pinned_pages != num_pages) { + tegra_soc_hwpm_err("Requested %llu pages / Got %ld pages", + num_pages, pinned_pages); + ret = -ENOMEM; + goto wlist_unmap; + } + full_wlist = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); + if (!full_wlist) { + tegra_soc_hwpm_err("Couldn't map whitelist buffer into" + " kernel address space"); + ret = -ENOMEM; + goto wlist_unmap; + } + full_wlist_u64 = (u64 *)(full_wlist + offset); + + /* Fill in whitelist buffer */ + for (res_idx = 0, full_wlist_idx = 0; + res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; + res_idx++) { + if (!(hwpm_resources[res_idx].reserved)) + continue; + tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); + + for (aprt_idx = 0; + aprt_idx < hwpm_resources[res_idx].map_size; + aprt_idx++) { + aperture = &(hwpm_resources[res_idx].map[aprt_idx]); + if (aperture->wlist) { + for (aprt_wlist_idx = 0; + aprt_wlist_idx < aperture->wlist_size; + aprt_wlist_idx++, full_wlist_idx++) { + full_wlist_u64[full_wlist_idx] = + aperture->start_pa + + aperture->wlist[aprt_wlist_idx].reg; + } + } else { + tegra_soc_hwpm_err("NULL whitelist in aperture(0x%llx - 0x%llx)", + aperture->start_pa, + aperture->end_pa); + } + } + } + +wlist_unmap: + if (full_wlist) + vunmap(full_wlist); + if (pinned_pages > 0) { + for (page_idx = 0; page_idx < pinned_pages; page_idx++) { + set_page_dirty(pages[page_idx]); + put_page(pages[page_idx]); + } + } + if (pages) + kfree(pages); + } + + + return ret; +} + +static int exec_reg_ops_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ + int ret = 0; + struct tegra_soc_hwpm_exec_reg_ops *exec_reg_ops = + (struct tegra_soc_hwpm_exec_reg_ops *)ioctl_struct; + struct hwpm_resource_aperture *aperture = NULL; + int op_idx = 0; + struct tegra_soc_hwpm_reg_op *reg_op = NULL; + + if (!hwpm->bind_completed) { + tegra_soc_hwpm_err("The EXEC_REG_OPS IOCTL can only be called" + " after the BIND IOCTL."); + return -EPERM; + } + switch (exec_reg_ops->mode) { + case TEGRA_SOC_HWPM_REG_OP_MODE_FAIL_ON_FIRST: + case TEGRA_SOC_HWPM_REG_OP_MODE_CONT_ON_ERR: + break; + + default: + tegra_soc_hwpm_err("Invalid reg ops mode(%u)", + exec_reg_ops->mode); + return -EINVAL; + } + + for (op_idx = 0; op_idx < exec_reg_ops->op_count; op_idx++) { +#define REG_OP_FAIL(op_status, msg, ...) \ + do { \ + tegra_soc_hwpm_err(msg, ##__VA_ARGS__); \ + reg_op->status = \ + TEGRA_SOC_HWPM_REG_OP_STATUS_ ## op_status; \ + exec_reg_ops->b_all_reg_ops_passed = false; \ + if (exec_reg_ops->mode == \ + TEGRA_SOC_HWPM_REG_OP_MODE_FAIL_ON_FIRST) { \ + return -EINVAL; \ + } \ + } while (0) + + tegra_soc_hwpm_dbg("reg op: idx(%d), phys(0x%llx), cmd(%u)", + op_idx, reg_op->phys_addr, reg_op->cmd); + reg_op = &(exec_reg_ops->ops[op_idx]); + + /* The whitelist check is done here */ + aperture = find_hwpm_aperture(hwpm, reg_op->phys_addr, true); + if (!aperture) { + REG_OP_FAIL(INSUFFICIENT_PERMISSIONS, + "Invalid register address(0x%llx)", + reg_op->phys_addr); + continue; + } + + switch (reg_op->cmd) { + case TEGRA_SOC_HWPM_REG_OP_CMD_RD32: + reg_op->reg_val_lo = ioctl_readl(hwpm, + aperture, + reg_op->phys_addr); + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; + break; + + case TEGRA_SOC_HWPM_REG_OP_CMD_RD64: + reg_op->reg_val_lo = ioctl_readl(hwpm, + aperture, + reg_op->phys_addr); + reg_op->reg_val_hi = ioctl_readl(hwpm, + aperture, + reg_op->phys_addr + 4); + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; + break; + + /* Read Modify Write operation */ + case TEGRA_SOC_HWPM_REG_OP_CMD_WR32: + ret = IOCTL_REG_RMW(hwpm, + aperture, + reg_op->phys_addr, + reg_op->mask_lo, + reg_op->reg_val_lo); + if (ret < 0) { + REG_OP_FAIL(INVALID, + "WR32 REGOP failed for register(0x%llx)", + reg_op->phys_addr); + } else { + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; + } + break; + + /* Read Modify Write operation */ + case TEGRA_SOC_HWPM_REG_OP_CMD_WR64: + /* Lower 32 bits */ + ret = IOCTL_REG_RMW(hwpm, + aperture, + reg_op->phys_addr, + reg_op->mask_lo, + reg_op->reg_val_lo); + if (ret < 0) { + REG_OP_FAIL(INVALID, + "WR64 REGOP failed for register(0x%llx)", + reg_op->phys_addr); + continue; + } + + /* Upper 32 bits */ + ret = IOCTL_REG_RMW(hwpm, + aperture, + reg_op->phys_addr + 4, + reg_op->mask_hi, + reg_op->reg_val_hi); + if (ret < 0) { + REG_OP_FAIL(INVALID, + "WR64 REGOP failed for register(0x%llx)", + reg_op->phys_addr + 4); + } else { + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; + } + + break; + + default: + REG_OP_FAIL(INVALID_CMD, + "Invalid reg op command(%u)", + reg_op->cmd); + break; + } + + } + + exec_reg_ops->b_all_reg_ops_passed = true; + return 0; +} + +static int update_get_put_ioctl(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ + int ret = 0; + u32 reg_val = 0; + u32 field_val = 0; + u32 *mem_bytes_kernel_u32 = NULL; + struct tegra_soc_hwpm_update_get_put *update_get_put = + (struct tegra_soc_hwpm_update_get_put *)ioctl_struct; + + if (!hwpm->bind_completed) { + tegra_soc_hwpm_err("The UPDATE_GET_PUT IOCTL can only be called" + " after the BIND IOCTL."); + return -EPERM; + } + if (!hwpm->mem_bytes_kernel) { + tegra_soc_hwpm_err("mem_bytes buffer is not mapped in the driver"); + return -ENXIO; + } + + /* Update SW get pointer */ + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_MEM_BUMP_CH0, + update_get_put->mem_bump); + + /* Stream MEM_BYTES value to MEM_BYTES buffer */ + if (update_get_put->b_stream_mem_bytes) { + mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel); + *mem_bytes_kernel_u32 = TEGRA_SOC_HWPM_MEM_BYTES_INVALID; + ret = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_CH0, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_UPDATE_BYTES, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_UPDATE_BYTES_DOIT, + false); + if (ret < 0) { + tegra_soc_hwpm_err("Failed to stream mem_bytes to buffer"); + return -EIO; + } + } + + /* Read HW put pointer */ + if (update_get_put->b_read_mem_head) { + update_get_put->mem_head = + hwpm_readl(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_MEM_HEAD_CH0); + tegra_soc_hwpm_dbg("MEM_HEAD = 0x%llx", + update_get_put->mem_head); + } + + /* Check overflow error status */ + if (update_get_put->b_check_overflow) { + reg_val = hwpm_readl(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_STATUS_SECURE_CH0); + field_val = + HWPM_REG_V(NV_PERF_PMASYS_CHANNEL_STATUS_SECURE_MEMBUF_STATUS, + reg_val); + update_get_put->b_overflowed = + (field_val == + NV_PERF_PMASYS_CHANNEL_STATUS_SECURE_MEMBUF_STATUS_OVERFLOWED); + tegra_soc_hwpm_dbg("OVERFLOWED = %u", + update_get_put->b_overflowed); + } + + return 0; +} + +static long tegra_soc_hwpm_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + enum tegra_soc_hwpm_ioctl_num ioctl_num = _IOC_NR(cmd); + u32 ioc_dir = _IOC_DIR(cmd); + u32 arg_size = _IOC_SIZE(cmd); + struct tegra_soc_hwpm *hwpm = NULL; + void *arg_copy = NULL; + + if (!file) { + tegra_soc_hwpm_err("Invalid file"); + ret = -ENODEV; + goto fail; + } + if ((_IOC_TYPE(cmd) != TEGRA_SOC_HWPM_IOC_MAGIC) || + (ioctl_num < 0) || + (ioctl_num >= TERGA_SOC_HWPM_NUM_IOCTLS)) { + tegra_soc_hwpm_err("Unsupported IOCTL call"); + ret = -EINVAL; + goto fail; + } + if (arg_size != ioctls[ioctl_num].struct_size) { + tegra_soc_hwpm_err("Invalid userspace struct"); + ret = -EINVAL; + goto fail; + } + + hwpm = file->private_data; + if (!hwpm) { + tegra_soc_hwpm_err("Invalid hwpm struct"); + ret = -ENODEV; + goto fail; + } + + /* Only allocate a buffer if the IOCTL needs a buffer */ + if (!(ioc_dir & _IOC_NONE)) { + arg_copy = kzalloc(arg_size, GFP_KERNEL); + if (!arg_copy) { + tegra_soc_hwpm_err("Can't allocate memory for kernel struct"); + ret = -ENOMEM; + goto fail; + } + } + + if (ioc_dir & _IOC_WRITE) { + if (copy_from_user(arg_copy, (void __user *)arg, arg_size)) { + tegra_soc_hwpm_err("Failed to copy data from userspace" + " struct into kernel struct"); + ret = -EFAULT; + goto fail; + } + } + + /* + * We don't goto fail here because even if the IOCTL fails, we have to + * call copy_to_user() to pass back any valid output params to + * userspace. + */ + ret = ioctls[ioctl_num].handler(hwpm, arg_copy); + + if (ioc_dir & _IOC_READ) { + if (copy_to_user((void __user *)arg, arg_copy, arg_size)) { + tegra_soc_hwpm_err("Failed to copy data from kernel" + " struct into userspace struct"); + ret = -EFAULT; + goto fail; + } + } + + if (ret < 0) + goto fail; + + tegra_soc_hwpm_dbg("The %s IOCTL completed successfully!", + ioctls[ioctl_num].name); + goto cleanup; + +fail: + tegra_soc_hwpm_err("The %s IOCTL failed(%d)!", + ioctls[ioctl_num].name, ret); +cleanup: + if (arg_copy) + kfree(arg_copy); + + return ret; +} + +static int tegra_soc_hwpm_open(struct inode *inode, struct file *filp) +{ + int ret = 0; + unsigned int minor = iminor(inode); + struct tegra_soc_hwpm *hwpm = NULL; + struct resource *res = NULL; + u64 num_regs = 0; + + if (!inode) { + tegra_soc_hwpm_err("Invalid inode"); + return -EINVAL; + } + if (!filp) { + tegra_soc_hwpm_err("Invalid file"); + return -EINVAL; + } + if (minor > 0) { + tegra_soc_hwpm_err("Incorrect minor number"); + return -EBADFD; + } + + hwpm = container_of(inode->i_cdev, struct tegra_soc_hwpm, cdev); + if (!hwpm) { + tegra_soc_hwpm_err("Invalid hwpm struct"); + return -EINVAL; + } + filp->private_data = hwpm; + + /* FIXME: Enable clock and reset programming */ +#if 0 + ret = reset_control_assert(hwpm->hwpm_rst); + if (ret < 0) { + tegra_soc_hwpm_err("hwpm reset assert failed"); + ret = -ENODEV; + goto fail; + } + ret = reset_control_assert(hwpm->la_rst); + if (ret < 0) { + tegra_soc_hwpm_err("la reset assert failed"); + ret = -ENODEV; + goto fail; + } + ret = clk_prepare_enable(hwpm->la_clk); + if (ret < 0) { + tegra_soc_hwpm_err("la clock enable failed"); + ret = -ENODEV; + goto fail; + } + ret = reset_control_deassert(hwpm->la_rst); + if (ret < 0) { + tegra_soc_hwpm_err("la reset deassert failed"); + ret = -ENODEV; + goto fail; + } + ret = reset_control_deassert(hwpm->hwpm_rst); + if (ret < 0) { + tegra_soc_hwpm_err("hwpm reset deassert failed"); + ret = -ENODEV; + goto fail; + } +#endif + + /* Map PMA and RTR apertures */ + hwpm->dt_apertures[TEGRA_SOC_HWPM_PMA_DT] = + of_iomap(hwpm->np, TEGRA_SOC_HWPM_PMA_DT); + if (!hwpm->dt_apertures[TEGRA_SOC_HWPM_PMA_DT]) { + tegra_soc_hwpm_err("Couldn't map the PMA aperture"); + ret = -ENOMEM; + goto fail; + } + res = platform_get_resource(hwpm->pdev, + IORESOURCE_MEM, + TEGRA_SOC_HWPM_PMA_DT); + if ((!res) || (res->start == 0) || (res->end == 0)) { + tegra_soc_hwpm_err("Invalid resource for PMA"); + ret = -ENOMEM; + goto fail; + } + pma_map[1].start_pa = res->start; + pma_map[1].end_pa = res->end; + cmd_slice_rtr_map[0].start_pa = res->start; + cmd_slice_rtr_map[0].end_pa = res->end; + if (hwpm->fake_registers_enabled) { + num_regs = (res->end + 1 - res->start) / sizeof(*pma_fake_regs); + pma_fake_regs = (u32 *)kzalloc(sizeof(*pma_fake_regs) * num_regs, + GFP_KERNEL); + if (!pma_fake_regs) { + tegra_soc_hwpm_err("Couldn't allocate memory for PMA" + " fake registers"); + ret = -ENOMEM; + goto fail; + } + pma_map[1].fake_registers = pma_fake_regs; + cmd_slice_rtr_map[0].fake_registers = pma_fake_regs; + } + + hwpm->dt_apertures[TEGRA_SOC_HWPM_RTR_DT] = + of_iomap(hwpm->np, TEGRA_SOC_HWPM_RTR_DT); + if (!hwpm->dt_apertures[TEGRA_SOC_HWPM_RTR_DT]) { + tegra_soc_hwpm_err("Couldn't map the RTR aperture"); + ret = -ENOMEM; + goto fail; + } + res = platform_get_resource(hwpm->pdev, + IORESOURCE_MEM, + TEGRA_SOC_HWPM_RTR_DT); + if ((!res) || (res->start == 0) || (res->end == 0)) { + tegra_soc_hwpm_err("Invalid resource for RTR"); + ret = -ENOMEM; + goto fail; + } + cmd_slice_rtr_map[1].start_pa = res->start; + cmd_slice_rtr_map[1].end_pa = res->end; + if (hwpm->fake_registers_enabled) { + num_regs = (res->end + 1 - res->start) / + sizeof(*cmd_slice_rtr_map[1].fake_registers); + cmd_slice_rtr_map[1].fake_registers = + (u32 *)kzalloc(sizeof(*cmd_slice_rtr_map[1].fake_registers) * + num_regs, + GFP_KERNEL); + if (!cmd_slice_rtr_map[1].fake_registers) { + tegra_soc_hwpm_err("Couldn't allocate memory for RTR" + " fake registers"); + ret = -ENOMEM; + goto fail; + } + } + + /* FIXME: Remove after verification */ + /* Disable SLCG */ + ret = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CG2, + NV_PERF_PMASYS_CG2_SLCG, + NV_PERF_PMASYS_CG2_SLCG_DISABLED, + false); + if (ret < 0) { + tegra_soc_hwpm_err("Unable to disable PMA SLCG"); + ret = -EIO; + goto fail; + } + ret = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_RTR_DT, + NV_PERF_PMMSYS_SYS0ROUTER_CG2, + NV_PERF_PMMSYS_SYS0ROUTER_CG2_SLCG, + NV_PERF_PMMSYS_SYS0ROUTER_CG2_SLCG_DISABLED, + false); + if (ret < 0) { + tegra_soc_hwpm_err("Unable to disable ROUTER SLCG"); + ret = -EIO; + goto fail; + } + + /* Program PROD values */ + ret = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CONTROLB, + NV_PERF_PMASYS_CONTROLB_COALESCE_TIMEOUT_CYCLES, + NV_PERF_PMASYS_CONTROLB_COALESCE_TIMEOUT_CYCLES__PROD, + false); + if (ret < 0) { + tegra_soc_hwpm_err("Unable to program PROD value"); + ret = -EIO; + goto fail; + } + ret = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_CONFIG_USER_CH0, + NV_PERF_PMASYS_CHANNEL_CONFIG_USER_COALESCE_TIMEOUT_CYCLES, + NV_PERF_PMASYS_CHANNEL_CONFIG_USER_COALESCE_TIMEOUT_CYCLES__PROD, + false); + if (ret < 0) { + tegra_soc_hwpm_err("Unable to program PROD value"); + ret = -EIO; + goto fail; + } + + /* Initialize SW state */ + hwpm->bind_completed = false; + hwpm->full_wlist_size = -1; + +fail: + if (hwpm->dt_apertures[TEGRA_SOC_HWPM_PMA_DT]) { + iounmap(hwpm->dt_apertures[TEGRA_SOC_HWPM_PMA_DT]); + hwpm->dt_apertures[TEGRA_SOC_HWPM_PMA_DT] = NULL; + } + pma_map[1].start_pa = 0; + pma_map[1].end_pa = 0; + cmd_slice_rtr_map[0].start_pa = 0; + cmd_slice_rtr_map[0].end_pa = 0; + if (pma_fake_regs) { + kfree(pma_fake_regs); + pma_fake_regs = NULL; + pma_map[1].fake_registers = NULL; + cmd_slice_rtr_map[0].fake_registers = NULL; + } + + if (hwpm->dt_apertures[TEGRA_SOC_HWPM_RTR_DT]) { + iounmap(hwpm->dt_apertures[TEGRA_SOC_HWPM_RTR_DT]); + hwpm->dt_apertures[TEGRA_SOC_HWPM_RTR_DT] = NULL; + } + cmd_slice_rtr_map[1].start_pa = 0; + cmd_slice_rtr_map[1].end_pa = 0; + if (cmd_slice_rtr_map[1].fake_registers) { + kfree(cmd_slice_rtr_map[1].fake_registers); + cmd_slice_rtr_map[1].fake_registers = NULL; + } + + return ret; +} + +static ssize_t tegra_soc_hwpm_read(struct file *file, + char __user *ubuf, + size_t count, + loff_t *offp) +{ + return 0; +} + +/* FIXME: Fix double release bug */ +static int tegra_soc_hwpm_release(struct inode *inode, struct file *filp) +{ + int err = 0; + int ret = 0; + bool timeout = false; + int res_idx = 0; + int aprt_idx = 0; + u32 field_mask = 0; + u32 field_val = 0; + u32 *mem_bytes_kernel_u32 = NULL; + struct tegra_soc_hwpm *hwpm = NULL; + struct hwpm_resource_aperture *aperture = NULL; +#define RELEASE_FAIL(msg, ...) \ + do { \ + if (err < 0) { \ + tegra_soc_hwpm_err(msg, ##__VA_ARGS__); \ + if (ret == 0) \ + ret = err; \ + } \ + } while (0) + + if (!inode) { + tegra_soc_hwpm_err("Invalid inode"); + return -EINVAL; + } + if (!filp) { + tegra_soc_hwpm_err("Invalid file"); + return -EINVAL; + } + + hwpm = container_of(inode->i_cdev, struct tegra_soc_hwpm, cdev); + if (!hwpm) { + tegra_soc_hwpm_err("Invalid hwpm struct"); + return -EINVAL; + } + + /* Disable PMA triggers */ + err = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_TRIGGER_CONFIG_USER_CH0, + NV_PERF_PMASYS_TRIGGER_CONFIG_USER_PMA_PULSE, + NV_PERF_PMASYS_TRIGGER_CONFIG_USER_PMA_PULSE_DISABLE, + false); + RELEASE_FAIL("Unable to disable PMA triggers"); + + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_SYS_TRIGGER_START_MASK, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_SYS_TRIGGER_START_MASKB, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_SYS_TRIGGER_STOP_MASK, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_SYS_TRIGGER_STOP_MASKB, + 0); + + /* Wait for PERFMONs, ROUTER, and PMA to idle */ + timeout = HWPM_TIMEOUT(HWPM_REG_CHECK_F(hwpm_readl(hwpm, + TEGRA_SOC_HWPM_RTR_DT, + NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS), + NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED, + NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED_EMPTY), + "NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED_EMPTY"); + if (timeout && ret == 0) { + ret = -EIO; + } + timeout = HWPM_TIMEOUT(HWPM_REG_CHECK_F(hwpm_readl(hwpm, + TEGRA_SOC_HWPM_RTR_DT, + NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS), + NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS, + NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_EMPTY), + "NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_EMPTY"); + if (timeout && ret == 0) { + ret = -EIO; + } + field_mask = NV_PERF_PMASYS_ENGINESTATUS_STATUS_MASK | + NV_PERF_PMASYS_ENGINESTATUS_RBUFEMPTY_MASK; + field_val = HWPM_REG_F(NV_PERF_PMASYS_ENGINESTATUS_STATUS, + NV_PERF_PMASYS_ENGINESTATUS_STATUS_EMPTY); + field_val |= HWPM_REG_F(NV_PERF_PMASYS_ENGINESTATUS_RBUFEMPTY, + NV_PERF_PMASYS_ENGINESTATUS_RBUFEMPTY_EMPTY); + timeout = HWPM_TIMEOUT(HWPM_REG_CHECK(hwpm_readl(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_ENGINESTATUS), + field_mask, + field_val), + "NV_PERF_PMASYS_ENGINESTATUS"); + if (timeout && ret == 0) { + ret = -EIO; + } + + /* Disable all PERFMONs */ + tegra_soc_hwpm_dbg("Disabling PERFMONs"); + for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { + if (!hwpm_resources[res_idx].reserved) + continue; + tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); + + for (aprt_idx = 0; + aprt_idx < hwpm_resources[res_idx].map_size; + aprt_idx++) { + aperture = &(hwpm_resources[res_idx].map[aprt_idx]); + if (IS_PERFMON(aperture->dt_aperture)) { + tegra_soc_hwpm_dbg("Found PERFMON(0x%llx - 0x%llx)", + aperture->start_pa, + aperture->end_pa); + err = DRIVER_REG_RMW(hwpm, + aperture->dt_aperture, + NV_PERF_PMMSYS_CONTROL, + NV_PERF_PMMSYS_CONTROL_MODE, + NV_PERF_PMMSYS_CONTROL_MODE_DISABLE, + false); + RELEASE_FAIL("Unable to disable PERFMON(0x%llx - 0x%llx)", + aperture->start_pa, + aperture->end_pa); + } + } + } + + /* Stream MEM_BYTES to clear pipeline */ + if (hwpm->mem_bytes_kernel) { + mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel); + *mem_bytes_kernel_u32 = TEGRA_SOC_HWPM_MEM_BYTES_INVALID; + err = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_CH0, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_UPDATE_BYTES, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_UPDATE_BYTES_DOIT, + false); + RELEASE_FAIL("Unable to stream MEM_BYTES"); + timeout = HWPM_TIMEOUT(*mem_bytes_kernel_u32 != + TEGRA_SOC_HWPM_MEM_BYTES_INVALID, + "MEM_BYTES streaming"); + if (timeout && ret == 0) + ret = -EIO; + } + + /* Disable PMA streaming */ + err = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_TRIGGER_CONFIG_USER_CH0, + NV_PERF_PMASYS_TRIGGER_CONFIG_USER_RECORD_STREAM, + NV_PERF_PMASYS_TRIGGER_CONFIG_USER_RECORD_STREAM_DISABLE, + false); + RELEASE_FAIL("Unable to disable PMA streaming"); + err = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_CH0, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_STREAM, + NV_PERF_PMASYS_CHANNEL_CONTROL_USER_STREAM_DISABLE, + false); + RELEASE_FAIL("Unable to disable PMA streaming"); + + /* Memory Management */ + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTBASE_CH0, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTBASEUPPER_CH0, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_OUTSIZE_CH0, + 0); + hwpm_writel(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CHANNEL_MEM_BYTES_ADDR_CH0, + 0); + + if (hwpm->stream_sgt && (!IS_ERR(hwpm->stream_sgt))) { + dma_buf_unmap_attachment(hwpm->stream_attach, + hwpm->stream_sgt, + DMA_FROM_DEVICE); + } + hwpm->stream_sgt = NULL; + + if (hwpm->stream_attach && (!IS_ERR(hwpm->stream_attach))) { + dma_buf_detach(hwpm->stream_dma_buf, hwpm->stream_attach); + } + hwpm->stream_attach = NULL; + + if (hwpm->stream_dma_buf && (!IS_ERR(hwpm->stream_dma_buf))) { + dma_buf_put(hwpm->stream_dma_buf); + } + hwpm->stream_dma_buf = NULL; + + if (hwpm->mem_bytes_kernel) { + dma_buf_vunmap(hwpm->mem_bytes_dma_buf, + hwpm->mem_bytes_kernel); + hwpm->mem_bytes_kernel = NULL; + } + + if (hwpm->mem_bytes_sgt && (!IS_ERR(hwpm->mem_bytes_sgt))) { + dma_buf_unmap_attachment(hwpm->mem_bytes_attach, + hwpm->mem_bytes_sgt, + DMA_FROM_DEVICE); + } + hwpm->mem_bytes_sgt = NULL; + + if (hwpm->mem_bytes_attach && (!IS_ERR(hwpm->mem_bytes_attach))) { + dma_buf_detach(hwpm->mem_bytes_dma_buf, hwpm->mem_bytes_attach); + } + hwpm->mem_bytes_attach = NULL; + + if (hwpm->mem_bytes_dma_buf && (!IS_ERR(hwpm->mem_bytes_dma_buf))) { + dma_buf_put(hwpm->mem_bytes_dma_buf); + } + hwpm->mem_bytes_dma_buf = NULL; + + /* FIXME: Enable clock and reset programming */ + /* FIXME: Tell IPs which are being profiled to re-enable power management */ +#if 0 + err = reset_control_assert(hwpm->hwpm_rst); + RELEASE_FAIL("hwpm reset assert failed"); + err = reset_control_assert(hwpm->la_rst); + RELEASE_FAIL("la reset assert failed"); + clk_disable_unprepare(hwpm->la_clk); +#endif + + /* FIXME: Remove after verification */ + /* Enable SLCG */ + err = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_PMA_DT, + NV_PERF_PMASYS_CG2, + NV_PERF_PMASYS_CG2_SLCG, + NV_PERF_PMASYS_CG2_SLCG_ENABLED, + false); + RELEASE_FAIL("Unable to enable PMA SLCG"); + err = DRIVER_REG_RMW(hwpm, + TEGRA_SOC_HWPM_RTR_DT, + NV_PERF_PMMSYS_SYS0ROUTER_CG2, + NV_PERF_PMMSYS_SYS0ROUTER_CG2_SLCG, + NV_PERF_PMMSYS_SYS0ROUTER_CG2_SLCG_ENABLED, + false); + RELEASE_FAIL("Unable to enable ROUTER SLCG"); + + /* Unmap PMA and RTR apertures */ + tegra_soc_hwpm_dbg("Unmapping apertures"); + if (hwpm->dt_apertures[TEGRA_SOC_HWPM_PMA_DT]) { + iounmap(hwpm->dt_apertures[TEGRA_SOC_HWPM_PMA_DT]); + hwpm->dt_apertures[TEGRA_SOC_HWPM_PMA_DT] = NULL; + } + pma_map[1].start_pa = 0; + pma_map[1].end_pa = 0; + cmd_slice_rtr_map[0].start_pa = 0; + cmd_slice_rtr_map[0].end_pa = 0; + if (pma_fake_regs) { + kfree(pma_fake_regs); + pma_fake_regs = NULL; + pma_map[1].fake_registers = NULL; + cmd_slice_rtr_map[0].fake_registers = NULL; + } + if (hwpm->dt_apertures[TEGRA_SOC_HWPM_RTR_DT]) { + iounmap(hwpm->dt_apertures[TEGRA_SOC_HWPM_RTR_DT]); + hwpm->dt_apertures[TEGRA_SOC_HWPM_RTR_DT] = NULL; + } + cmd_slice_rtr_map[1].start_pa = 0; + cmd_slice_rtr_map[1].end_pa = 0; + if (cmd_slice_rtr_map[1].fake_registers) { + kfree(cmd_slice_rtr_map[1].fake_registers); + cmd_slice_rtr_map[1].fake_registers = NULL; + } + + /* Reset resource and aperture state */ + for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { + if (!hwpm_resources[res_idx].reserved) + continue; + tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); + hwpm_resources[res_idx].reserved = false; + + for (aprt_idx = 0; + aprt_idx < hwpm_resources[res_idx].map_size; + aprt_idx++) { + aperture = &(hwpm_resources[res_idx].map[aprt_idx]); + if ((aperture->dt_aperture == TEGRA_SOC_HWPM_PMA_DT) || + (aperture->dt_aperture == TEGRA_SOC_HWPM_RTR_DT)) { + /* PMA and RTR apertures are handled separately */ + continue; + } else if (IS_PERFMON(aperture->dt_aperture)) { + if (hwpm->dt_apertures[aperture->dt_aperture]) { + iounmap(hwpm->dt_apertures[aperture->dt_aperture]); + hwpm->dt_apertures[aperture->dt_aperture] = NULL; + } + + aperture->start_pa = 0; + aperture->end_pa = 0; + + if (aperture->fake_registers) { + kfree(aperture->fake_registers); + aperture->fake_registers = NULL; + } + } else { /* IP apertures */ + if (aperture->fake_registers) { + kfree(aperture->fake_registers); + aperture->fake_registers = NULL; + set_mc_fake_regs(hwpm, aperture, true); + } + } + } + } + + return ret; +} + +/* File ops for device node */ +const struct file_operations tegra_soc_hwpm_ops = { + .owner = THIS_MODULE, + .open = tegra_soc_hwpm_open, + .read = tegra_soc_hwpm_read, + .release = tegra_soc_hwpm_release, + .unlocked_ioctl = tegra_soc_hwpm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = tegra_soc_hwpm_ioctl, +#endif +}; diff --git a/tegra-soc-hwpm-log.c b/tegra-soc-hwpm-log.c new file mode 100644 index 0000000..0ea0ebb --- /dev/null +++ b/tegra-soc-hwpm-log.c @@ -0,0 +1,53 @@ +/* + * tegra-soc-hwpm-log.c: + * This file adds logging APIs for the Tegra SOC HWPM driver. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include "tegra-soc-hwpm.h" + +#define LOG_BUF_SIZE 160 + +static void tegra_soc_hwpm_print(const char *func, + int line, + int type, + const char *log) +{ + switch (type) { + case tegra_soc_hwpm_log_err: + pr_err(TEGRA_SOC_HWPM_MODULE_NAME ": %s: %d: ERROR: %s\n", + func, line, log); + break; + case tegra_soc_hwpm_log_dbg: + pr_info(TEGRA_SOC_HWPM_MODULE_NAME ": %s: %d: DEBUG: %s\n", + func, line, log); + break; + } +} + +void tegra_soc_hwpm_log(const char *func, int line, int type, const char *fmt, ...) +{ + char log[LOG_BUF_SIZE]; + va_list args; + + va_start(args, fmt); + (void) vsnprintf(log, LOG_BUF_SIZE, fmt, args); + va_end(args); + + tegra_soc_hwpm_print(func, line, type, log); +} diff --git a/tegra-soc-hwpm-log.h b/tegra-soc-hwpm-log.h new file mode 100644 index 0000000..deb1659 --- /dev/null +++ b/tegra-soc-hwpm-log.h @@ -0,0 +1,39 @@ +/* + * tegra-soc-hwpm-log.h: + * This is the logging API header for the Tegra SOC HWPM driver. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef TEGRA_SOC_HWPM_LOG_H +#define TEGRA_SOC_HWPM_LOG_H + +#define TEGRA_SOC_HWPM_MODULE_NAME "tegra-soc-hwpm" + +enum tegra_soc_hwpm_log_type { + tegra_soc_hwpm_log_err, /* Error prints */ + tegra_soc_hwpm_log_dbg, /* Debug prints */ +}; + +#define tegra_soc_hwpm_err(fmt, arg...) \ + tegra_soc_hwpm_log(__func__, __LINE__, tegra_soc_hwpm_log_err, \ + fmt, ##arg) +#define tegra_soc_hwpm_dbg(fmt, arg...) \ + tegra_soc_hwpm_log(__func__, __LINE__, tegra_soc_hwpm_log_dbg, \ + fmt, ##arg) + +void tegra_soc_hwpm_log(const char *func, int line, int type, const char *fmt, ...); + +#endif /* TEGRA_SOC_HWPM_LOG_H */ diff --git a/tegra-soc-hwpm.c b/tegra-soc-hwpm.c new file mode 100644 index 0000000..4214e4c --- /dev/null +++ b/tegra-soc-hwpm.c @@ -0,0 +1,230 @@ +/* + * tegra-soc-hwpm.c: + * This is Tegra's driver for programming the SOC HWPM path. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include + +#include "tegra-soc-hwpm.h" + +static const struct of_device_id tegra_soc_hwpm_of_match[] = { + { + .compatible = "nvidia,t23x-soc-hwpm", + }, { + }, +}; +MODULE_DEVICE_TABLE(of, tegra_soc_hwpm_of_match); + +static int tegra_soc_hwpm_probe(struct platform_device *pdev) +{ + int ret = 0; + struct device *dev = NULL; + struct tegra_soc_hwpm *hwpm = NULL; + + if (!pdev) { + tegra_soc_hwpm_err("Invalid platform device"); + ret = -ENODEV; + goto fail; + } + + hwpm = kzalloc(sizeof(struct tegra_soc_hwpm), GFP_KERNEL); + if (!hwpm) { + tegra_soc_hwpm_err("Couldn't allocate memory for hwpm struct"); + ret = -ENOMEM; + goto fail; + } + hwpm->pdev = pdev; + hwpm->dev = &pdev->dev; + hwpm->np = pdev->dev.of_node; + hwpm->class.owner = THIS_MODULE; + hwpm->class.name = TEGRA_SOC_HWPM_MODULE_NAME; + + /* Create device node */ + ret = class_register(&hwpm->class); + if (ret) { + tegra_soc_hwpm_err("Failed to register class"); + goto class_register; + } + + ret = alloc_chrdev_region(&hwpm->dev_t, 0, 1, dev_name(hwpm->dev)); + if (ret) { + tegra_soc_hwpm_err("Failed to allocate device region"); + goto alloc_chrdev_region; + } + + cdev_init(&hwpm->cdev, &tegra_soc_hwpm_ops); + hwpm->cdev.owner = THIS_MODULE; + + ret = cdev_add(&hwpm->cdev, hwpm->dev_t, 1); + if (ret) { + tegra_soc_hwpm_err("Failed to add cdev"); + goto cdev_add; + } + + dev = device_create(&hwpm->class, + NULL, + hwpm->dev_t, + NULL, + TEGRA_SOC_HWPM_MODULE_NAME); + if (IS_ERR(dev)) { + tegra_soc_hwpm_err("Failed to create device"); + ret = PTR_ERR(dev); + goto device_create; + } + + /* FIXME: Enable clock and reset programming */ +#if 0 + hwpm->la_clk = devm_clk_get(hwpm->dev, "la"); + if (IS_ERR(hwpm->la_clk)) { + tegra_soc_hwpm_err("Missing la clock"); + ret = PTR_ERR(hwpm->la_clk); + goto fail; + } + + hwpm->la_rst = devm_reset_control_get(hwpm->dev, "la"); + if (IS_ERR(hwpm->la_rst)) { + tegra_soc_hwpm_err("Missing la reset"); + ret = PTR_ERR(hwpm->la_rst); + goto fail; + } + + hwpm->hwpm_rst = devm_reset_control_get(hwpm->dev, "hwpm"); + if (IS_ERR(hwpm->hwpm_rst)) { + tegra_soc_hwpm_err("Missing hwpm reset"); + ret = PTR_ERR(hwpm->hwpm_rst); + goto fail; + } + */ +#endif + + tegra_soc_hwpm_debugfs_init(hwpm); + + /* + * Currently VDK doesn't have a fmodel for SOC HWPM. Therefore, we + * enable fake registers on VDK for minimal testing. + */ + if (tegra_platform_is_vdk()) + hwpm->fake_registers_enabled = true; + else + hwpm->fake_registers_enabled = false; + + platform_set_drvdata(pdev, hwpm); + + tegra_soc_hwpm_dbg("Probe successful!"); + goto success; + + +device_create: + cdev_del(&hwpm->cdev); +cdev_add: + unregister_chrdev_region(hwpm->dev_t, 1); +alloc_chrdev_region: + class_unregister(&hwpm->class); +class_register: + kfree(hwpm); + /* FIXME: Enable clock and reset programming */ +#if 0 + if (hwpm->la_clk) + devm_clk_put(hwpm->dev, hwpm->la_clk); + if (hwpm->la_rst) + reset_control_assert(hwpm->la_rst); + if (hwpm->hwpm_rst) + reset_control_assert(hwpm->hwpm_rst); + */ +#endif +fail: + tegra_soc_hwpm_err("Probe failed!"); +success: + return ret; +} + +static int tegra_soc_hwpm_remove(struct platform_device *pdev) +{ + struct tegra_soc_hwpm *hwpm = NULL; + + if (!pdev) { + tegra_soc_hwpm_err("Invalid platform device"); + return -ENODEV; + } + + hwpm = platform_get_drvdata(pdev); + if (!hwpm) { + tegra_soc_hwpm_err("Invalid hwpm struct"); + return -ENODEV; + } + + tegra_soc_hwpm_debugfs_deinit(hwpm); + + /* FIXME: Enable clock and reset programming */ +#if 0 + if (hwpm->la_clk) + devm_clk_put(hwpm->dev, hwpm->la_clk); + if (hwpm->la_rst) + reset_control_assert(hwpm->la_rst); + if (hwpm->hwpm_rst) + reset_control_assert(hwpm->hwpm_rst); + */ +#endif + + device_destroy(&hwpm->class, hwpm->dev_t); + cdev_del(&hwpm->cdev); + unregister_chrdev_region(hwpm->dev_t, 1); + class_unregister(&hwpm->class); + + kfree(hwpm); + + return 0; +} + +static struct platform_driver tegra_soc_hwpm_pdrv = { + .probe = tegra_soc_hwpm_probe, + .remove = tegra_soc_hwpm_remove, + .driver = { + .name = TEGRA_SOC_HWPM_MODULE_NAME, + .of_match_table = of_match_ptr(tegra_soc_hwpm_of_match), + }, +}; + +static int __init tegra_soc_hwpm_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&tegra_soc_hwpm_pdrv); + if (ret < 0) + tegra_soc_hwpm_err("Platform driver register failed"); + + return ret; +} + +static void __exit tegra_soc_hwpm_exit(void) +{ + tegra_soc_hwpm_dbg("Unloading the Tegra SOC HWPM driver"); + platform_driver_unregister(&tegra_soc_hwpm_pdrv); +} + +module_init(tegra_soc_hwpm_init); +module_exit(tegra_soc_hwpm_exit); + +MODULE_ALIAS(TEGRA_SOC_HWPM_MODULE_NAME); +MODULE_DESCRIPTION("Tegra SOC HWPM Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/tegra-soc-hwpm.h b/tegra-soc-hwpm.h new file mode 100644 index 0000000..e37fca0 --- /dev/null +++ b/tegra-soc-hwpm.h @@ -0,0 +1,109 @@ +/* + * tegra-soc-hwpm.h: + * This is the header for the Tegra SOC HWPM driver. + * + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef TEGRA_SOC_HWPM_H +#define TEGRA_SOC_HWPM_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tegra-soc-hwpm-log.h" +#include "tegra-soc-hwpm-hw.h" +#include + +/* FIXME: Default timeout is 1 sec. Is this sufficient for pre-si? */ +#define HWPM_TIMEOUT(timeout_check, expiry_msg) ({ \ + bool timeout_expired = false; \ + s32 timeout_msecs = 1000; \ + u32 sleep_msecs = 100; \ + while(!(timeout_check)) { \ + msleep(sleep_msecs); \ + timeout_msecs -= sleep_msecs; \ + if (timeout_msecs <= 0) { \ + tegra_soc_hwpm_err("Timeout expired for %s!", \ + expiry_msg); \ + timeout_expired = true; \ + break; \ + } \ + } \ + timeout_expired; \ +}) + +/* Driver struct */ +struct tegra_soc_hwpm { + /* Device */ + struct platform_device *pdev; + struct device *dev; + struct device_node *np; + struct class class; + dev_t dev_t; + struct cdev cdev; + + /* MMIO apertures in device tree */ + void __iomem *dt_apertures[TEGRA_SOC_HWPM_NUM_DT_APERTURES]; + + /* Clocks and resets */ + /* FIXME: Enable clock and reset programming */ +#if 0 + struct clk *la_clk; + struct reset_control *la_rst; + struct reset_control *hwpm_rst; +#endif + + /* Memory Management */ + struct dma_buf *stream_dma_buf; + struct dma_buf_attachment *stream_attach; + struct sg_table *stream_sgt; + struct dma_buf *mem_bytes_dma_buf; + struct dma_buf_attachment *mem_bytes_attach; + struct sg_table *mem_bytes_sgt; + void *mem_bytes_kernel; + + /* SW State */ + bool bind_completed; + s32 full_wlist_size; + + /* Debugging */ +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; +#endif + bool fake_registers_enabled; +}; + +extern const struct file_operations tegra_soc_hwpm_ops; + +#ifdef CONFIG_DEBUG_FS +void tegra_soc_hwpm_debugfs_init(struct tegra_soc_hwpm *hwpm); +void tegra_soc_hwpm_debugfs_deinit(struct tegra_soc_hwpm *hwpm); +#else +static inline void tegra_soc_hwpm_debugfs_init(struct tegra_soc_hwpm *hwpm) +{ + hwpm->debugfs_root = NULL; +} +static inline void tegra_soc_hwpm_debugfs_deinit(struct tegra_soc_hwpm *hwpm) {} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* TEGRA_SOC_HWPM_H */