tegra: hwpm: t264: Merge t264-hwpm files to hwpm

Merge the T264 private source code to hwpm common code.
This is done after T264 source code can be made public.

Bug 4856428
Bug 4943517

Signed-off-by: vasukis <vasukis@nvidia.com>
Change-Id: Ie830c5465f32f49978cb465d68785ab3dbaee984
Reviewed-on: https://git-master.nvidia.com/r/c/linux-hwpm/+/3219865
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
This commit is contained in:
vasukis
2024-09-26 23:19:43 +00:00
committed by mobile promotions
parent b8a884d226
commit bb4b1def61
43 changed files with 11081 additions and 24 deletions

View File

@@ -18,3 +18,10 @@ config TEGRA_TH500_HWPM
default y if (TEGRA_SOC_HWPM && ARCH_TEGRA_TH500_SOC)
help
TH500 performance monitoring driver.
config TEGRA_T264_HWPM
bool "Tegra T264 HWPM driver"
depends on TEGRA_SOC_HWPM && ARCH_TEGRA_T264_SOC
default y if (TEGRA_SOC_HWPM && ARCH_TEGRA_T264_SOC)
help
T264 performance monitoring driver.

View File

@@ -90,3 +90,9 @@ ccflags-y += -DCONFIG_TEGRA_HWPM_TH500
include ${srctree.hwpm}/drivers/tegra/hwpm/Makefile.th500.sources
nvhwpm-objs += ${nvhwpm-th500-objs}
endif
# Include T264 files
CONFIG_TEGRA_T264_HWPM := y
ccflags-y += -DCONFIG_TEGRA_T264_HWPM
include ${srctree.hwpm}/drivers/tegra/hwpm/Makefile.t264.sources
nvhwpm-objs += ${nvhwpm-t264-objs}

View File

@@ -0,0 +1,91 @@
# -*- mode: makefile -*-
#
# Copyright (c) 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Tegra SOC HWPM T264 sources
#
ifeq ($(CONFIG_TEGRA_HWPM_T234),y)
nvhwpm-t264-objs += hal/t264/t264_aperture.o
nvhwpm-t264-objs += hal/t264/t264_interface.o
nvhwpm-t264-objs += hal/t264/t264_ip.o
nvhwpm-t264-objs += hal/t264/t264_mem_mgmt.o
nvhwpm-t264-objs += hal/t264/t264_resource.o
nvhwpm-t264-objs += hal/t264/t264_regops_allowlist.o
#
# RTR/PMA are HWPM IPs and can be enabled by default
#
nvhwpm-t264-objs += hal/t264/ip/pma/t264_pma.o
nvhwpm-t264-objs += hal/t264/ip/rtr/t264_rtr.o
#
# One of the HWPM components is a perfmux. Perfmux registers belong to the
# IP domain. There are 2 ways of accessing perfmux registers
# - option 1: implement HWPM <-> IP interface. IP drivers register with HWPM
# driver and share required function pointers
# - option 2: map perfmux register address in HWPM driver
# Option 1 is the preferred solution. However, IP drivers have yet to
# implement the interface. HWPM driver implements option 2 for validation of suc IPs.
# If an IP is forced to available status from HWPM driver perspective, it is user's
# responsibility to ensure that the IP is infact present on the SOC config and
# unpowergated before running any HWPM experiments.
#
# Enable CONFIG_T264_HWPM_ALLOW_FORCE_ENABLE for internal builds.
# Note: We should work towards removing force enabling IP.
#
ifeq ($(CONFIG_HWPM_BUILD_INTERNAL),y)
ccflags-y += -DCONFIG_T264_HWPM_ALLOW_FORCE_ENABLE
endif
# Below IPs are enabled for all builds
ccflags-y += -DCONFIG_T264_HWPM_IP_PVA
nvhwpm-t264-objs += hal/t264/ip/pva/t264_pva.o
ccflags-y += -DCONFIG_T264_HWPM_IP_MSS_CHANNEL
nvhwpm-t264-objs += hal/t264/ip/mss_channel/t264_mss_channel.o
ccflags-y += -DCONFIG_T264_HWPM_IP_VIC
nvhwpm-t264-objs += hal/t264/ip/vic/t264_vic.o
ccflags-y += -DCONFIG_T264_HWPM_IP_MSS_HUBS
nvhwpm-t264-objs += hal/t264/ip/mss_hubs/t264_mss_hubs.o
ccflags-y += -DCONFIG_T264_HWPM_IP_OCU
nvhwpm-t264-objs += hal/t264/ip/ocu/t264_ocu.o
ccflags-y += -DCONFIG_T264_HWPM_IP_SMMU
nvhwpm-t264-objs += hal/t264/ip/smmu/t264_smmu.o
ccflags-y += -DCONFIG_T264_HWPM_IP_UCF_MSW
nvhwpm-t264-objs += hal/t264/ip/ucf_msw/t264_ucf_msw.o
ccflags-y += -DCONFIG_T264_HWPM_IP_UCF_PSW
nvhwpm-t264-objs += hal/t264/ip/ucf_psw/t264_ucf_psw.o
ccflags-y += -DCONFIG_T264_HWPM_IP_UCF_CSW
nvhwpm-t264-objs += hal/t264/ip/ucf_csw/t264_ucf_csw.o
ccflags-y += -DCONFIG_T264_HWPM_IP_CPU
nvhwpm-t264-objs += hal/t264/ip/cpu/t264_cpu.o
endif

View File

@@ -31,13 +31,11 @@
#include <hal/t234/t234_init.h>
#include <hal/th500/th500_init.h>
#include <hal/t264/t264_init.h>
#ifdef CONFIG_TEGRA_NEXT1_HWPM
#include <tegra_hwpm_next1_init.h>
#endif
#ifdef CONFIG_TEGRA_NEXT3_HWPM
#include <tegra_hwpm_next3_init.h>
#endif
static int tegra_hwpm_init_chip_ip_structures(struct tegra_soc_hwpm *hwpm,
u32 chip_id, u32 chip_id_rev)
@@ -76,19 +74,21 @@ static int tegra_hwpm_init_chip_ip_structures(struct tegra_soc_hwpm *hwpm,
}
break;
#endif
default:
#if defined(CONFIG_TEGRA_NEXT3_HWPM)
err = tegra_hwpm_next3_init_chip_ip_structures(
hwpm, chip_id, chip_id_rev);
if (err == 0) {
/* Execution is for NEXT3 chip */
#ifdef CONFIG_TEGRA_T264_HWPM
case 0x26:
switch (chip_id_rev) {
case 0x4:
err = t264_hwpm_init_chip_info(hwpm);
break;
default:
tegra_hwpm_err(hwpm, "Chip 0x%x rev 0x%x not supported",
chip_id, chip_id_rev);
break;
}
break;
#endif
#if !defined(CONFIG_TEGRA_NEXT3_HWPM)
default:
tegra_hwpm_err(hwpm, "Chip 0x%x not supported", chip_id);
#endif
break;
}

View File

@@ -0,0 +1,339 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*/
/*
* Function/Macro naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef T264_ADDR_MAP_SOC_HWPM_H
#define T264_ADDR_MAP_SOC_HWPM_H
#define addr_map_rpg_grp_system_base_r() (0x1600000U)
#define addr_map_rpg_grp_system_limit_r() (0x16fffffU)
#define addr_map_rpg_grp_ucf_base_r() (0x8101600000U)
#define addr_map_rpg_grp_ucf_limit_r() (0x81016fffffU)
#define addr_map_rpg_grp_vision_base_r() (0x8181600000U)
#define addr_map_rpg_grp_vision_limit_r() (0x81816fffffU)
#define addr_map_rpg_grp_disp_usb_base_r() (0x8801600000U)
#define addr_map_rpg_grp_disp_usb_limit_r() (0x88016fffffU)
#define addr_map_rpg_grp_uphy0_base_r() (0xa801600000U)
#define addr_map_rpg_grp_uphy0_limit_r() (0xa8016fffffU)
#define addr_map_rpg_pm_hwpm_base_r() (0x1604000U)
#define addr_map_rpg_pm_hwpm_limit_r() (0x1604fffU)
#define addr_map_pma_base_r() (0x1610000U)
#define addr_map_pma_limit_r() (0x1611fffU)
#define addr_map_rtr_base_r() (0x1612000U)
#define addr_map_rtr_limit_r() (0x1612fffU)
#define addr_map_rpg_pm_mss0_base_r() (0x8101621000U)
#define addr_map_rpg_pm_mss0_limit_r() (0x8101621fffU)
#define addr_map_rpg_pm_mss1_base_r() (0x8101622000U)
#define addr_map_rpg_pm_mss1_limit_r() (0x8101622fffU)
#define addr_map_rpg_pm_mss2_base_r() (0x8101623000U)
#define addr_map_rpg_pm_mss2_limit_r() (0x8101623fffU)
#define addr_map_rpg_pm_mss3_base_r() (0x8101624000U)
#define addr_map_rpg_pm_mss3_limit_r() (0x8101624fffU)
#define addr_map_rpg_pm_mss4_base_r() (0x8101625000U)
#define addr_map_rpg_pm_mss4_limit_r() (0x8101625fffU)
#define addr_map_rpg_pm_mss5_base_r() (0x8101626000U)
#define addr_map_rpg_pm_mss5_limit_r() (0x8101626fffU)
#define addr_map_rpg_pm_mss6_base_r() (0x8101627000U)
#define addr_map_rpg_pm_mss6_limit_r() (0x8101627fffU)
#define addr_map_rpg_pm_mss7_base_r() (0x8101628000U)
#define addr_map_rpg_pm_mss7_limit_r() (0x8101628fffU)
#define addr_map_rpg_pm_mss8_base_r() (0x8101629000U)
#define addr_map_rpg_pm_mss8_limit_r() (0x8101629fffU)
#define addr_map_rpg_pm_mss9_base_r() (0x810162a000U)
#define addr_map_rpg_pm_mss9_limit_r() (0x810162afffU)
#define addr_map_rpg_pm_mss10_base_r() (0x810162b000U)
#define addr_map_rpg_pm_mss10_limit_r() (0x810162bfffU)
#define addr_map_rpg_pm_mss11_base_r() (0x810162c000U)
#define addr_map_rpg_pm_mss11_limit_r() (0x810162cfffU)
#define addr_map_rpg_pm_mss12_base_r() (0x810162d000U)
#define addr_map_rpg_pm_mss12_limit_r() (0x810162dfffU)
#define addr_map_rpg_pm_mss13_base_r() (0x810162e000U)
#define addr_map_rpg_pm_mss13_limit_r() (0x810162efffU)
#define addr_map_rpg_pm_mss14_base_r() (0x810162f000U)
#define addr_map_rpg_pm_mss14_limit_r() (0x810162ffffU)
#define addr_map_rpg_pm_mss15_base_r() (0x8101630000U)
#define addr_map_rpg_pm_mss15_limit_r() (0x8101630fffU)
#define addr_map_mcb_base_r() (0x8108020000U)
#define addr_map_mcb_limit_r() (0x810803ffffU)
#define addr_map_mc0_base_r() (0x8108040000U)
#define addr_map_mc0_limit_r() (0x810805ffffU)
#define addr_map_mc1_base_r() (0x8108060000U)
#define addr_map_mc1_limit_r() (0x810807ffffU)
#define addr_map_mc2_base_r() (0x8108080000U)
#define addr_map_mc2_limit_r() (0x810809ffffU)
#define addr_map_mc3_base_r() (0x81080a0000U)
#define addr_map_mc3_limit_r() (0x81080bffffU)
#define addr_map_mc4_base_r() (0x81080c0000U)
#define addr_map_mc4_limit_r() (0x81080dffffU)
#define addr_map_mc5_base_r() (0x81080e0000U)
#define addr_map_mc5_limit_r() (0x81080fffffU)
#define addr_map_mc6_base_r() (0x8108100000U)
#define addr_map_mc6_limit_r() (0x810811ffffU)
#define addr_map_mc7_base_r() (0x8108120000U)
#define addr_map_mc7_limit_r() (0x810813ffffU)
#define addr_map_mc8_base_r() (0x8108140000U)
#define addr_map_mc8_limit_r() (0x810815ffffU)
#define addr_map_mc9_base_r() (0x8108160000U)
#define addr_map_mc9_limit_r() (0x810817ffffU)
#define addr_map_mc10_base_r() (0x8108180000U)
#define addr_map_mc10_limit_r() (0x810819ffffU)
#define addr_map_mc11_base_r() (0x81081a0000U)
#define addr_map_mc11_limit_r() (0x81081bffffU)
#define addr_map_mc12_base_r() (0x81081c0000U)
#define addr_map_mc12_limit_r() (0x81081dffffU)
#define addr_map_mc13_base_r() (0x81081e0000U)
#define addr_map_mc13_limit_r() (0x81081fffffU)
#define addr_map_mc14_base_r() (0x8108200000U)
#define addr_map_mc14_limit_r() (0x810821ffffU)
#define addr_map_mc15_base_r() (0x8108220000U)
#define addr_map_mc15_limit_r() (0x810823ffffU)
#define addr_map_rpg_pm_pvac0_base_r() (0x8181605000U)
#define addr_map_rpg_pm_pvac0_limit_r() (0x8181605fffU)
#define addr_map_rpg_pm_pvav0_base_r() (0x8181606000U)
#define addr_map_rpg_pm_pvav0_limit_r() (0x8181606fffU)
#define addr_map_rpg_pm_pvav1_base_r() (0x8181607000U)
#define addr_map_rpg_pm_pvav1_limit_r() (0x8181607fffU)
#define addr_map_rpg_pm_pvap0_base_r() (0x818160e000U)
#define addr_map_rpg_pm_pvap0_limit_r() (0x818160efffU)
#define addr_map_rpg_pm_pvap1_base_r() (0x818160f000U)
#define addr_map_rpg_pm_pvap1_limit_r() (0x818160ffffU)
#define addr_map_pva0_pm_base_r() (0x818c200000U)
#define addr_map_pva0_pm_limit_r() (0x818c20ffffU)
#define addr_map_pva1_pm_base_r() (0x818cb00000U)
#define addr_map_pva1_pm_limit_r() (0x818cb0ffffU)
#define addr_map_rpg_pm_vic0_base_r() (0x8181604000U)
#define addr_map_rpg_pm_vic0_limit_r() (0x8181604fffU)
#define addr_map_vic_base_r() (0x8188050000U)
#define addr_map_vic_limit_r() (0x818808ffffU)
#define addr_map_rpg_pm_system_msshub0_base_r() (0x1600000U)
#define addr_map_rpg_pm_system_msshub0_limit_r() (0x1600fffU)
#define addr_map_rpg_pm_ucf_msshub0_base_r() (0x810163e000U)
#define addr_map_rpg_pm_ucf_msshub0_limit_r() (0x810163efffU)
#define addr_map_rpg_pm_ucf_msshub1_base_r() (0x810163f000U)
#define addr_map_rpg_pm_ucf_msshub1_limit_r() (0x810163ffffU)
#define addr_map_rpg_pm_ucf_msshub2_base_r() (0x810164f000U)
#define addr_map_rpg_pm_ucf_msshub2_limit_r() (0x810164ffffU)
#define addr_map_rpg_pm_vision_msshub0_base_r() (0x818160b000U)
#define addr_map_rpg_pm_vision_msshub0_limit_r() (0x818160bfffU)
#define addr_map_rpg_pm_vision_msshub1_base_r() (0x818160c000U)
#define addr_map_rpg_pm_vision_msshub1_limit_r() (0x818160cfffU)
#define addr_map_rpg_pm_disp_usb_msshub0_base_r() (0x8801601000U)
#define addr_map_rpg_pm_disp_usb_msshub0_limit_r() (0x8801601fffU)
#define addr_map_rpg_pm_uphy0_msshub0_base_r() (0xa801628000U)
#define addr_map_rpg_pm_uphy0_msshub0_limit_r() (0xa801628fffU)
#define addr_map_rpg_pm_uphy0_msshub1_base_r() (0xa801629000U)
#define addr_map_rpg_pm_uphy0_msshub1_limit_r() (0xa801629fffU)
#define addr_map_rpg_pm_ocu_base_r() (0xa801604000U)
#define addr_map_rpg_pm_ocu_limit_r() (0xa801604fffU)
#define addr_map_ocu_base_r() (0xa808740000U)
#define addr_map_ocu_limit_r() (0xa80874ffffU)
#define addr_map_rpg_pm_ucf_smmu0_base_r() (0x8101642000U)
#define addr_map_rpg_pm_ucf_smmu0_limit_r() (0x8101642fffU)
#define addr_map_rpg_pm_ucf_smmu1_base_r() (0x8101643000U)
#define addr_map_rpg_pm_ucf_smmu1_limit_r() (0x8101643fffU)
#define addr_map_rpg_pm_ucf_smmu3_base_r() (0x810164b000U)
#define addr_map_rpg_pm_ucf_smmu3_limit_r() (0x810164bfffU)
#define addr_map_rpg_pm_ucf_smmu2_base_r() (0x8101653000U)
#define addr_map_rpg_pm_ucf_smmu2_limit_r() (0x8101653fffU)
#define addr_map_rpg_pm_disp_usb_smmu0_base_r() (0x8801602000U)
#define addr_map_rpg_pm_disp_usb_smmu0_limit_r() (0x8801602fffU)
#define addr_map_smmu1_base_r() (0x8105a30000U)
#define addr_map_smmu1_limit_r() (0x8105a3ffffU)
#define addr_map_smmu2_base_r() (0x8106a30000U)
#define addr_map_smmu2_limit_r() (0x8106a3ffffU)
#define addr_map_smmu0_base_r() (0x810aa30000U)
#define addr_map_smmu0_limit_r() (0x810aa3ffffU)
#define addr_map_smmu4_base_r() (0x810ba30000U)
#define addr_map_smmu4_limit_r() (0x810ba3ffffU)
#define addr_map_smmu3_base_r() (0x8806a30000U)
#define addr_map_smmu3_limit_r() (0x8806a3ffffU)
#define addr_map_rpg_pm_ucf_msw0_base_r() (0x8101600000U)
#define addr_map_rpg_pm_ucf_msw0_limit_r() (0x8101600fffU)
#define addr_map_rpg_pm_ucf_msw1_base_r() (0x8101601000U)
#define addr_map_rpg_pm_ucf_msw1_limit_r() (0x8101601fffU)
#define addr_map_rpg_pm_ucf_msw2_base_r() (0x8101602000U)
#define addr_map_rpg_pm_ucf_msw2_limit_r() (0x8101602fffU)
#define addr_map_rpg_pm_ucf_msw3_base_r() (0x8101603000U)
#define addr_map_rpg_pm_ucf_msw3_limit_r() (0x8101603fffU)
#define addr_map_rpg_pm_ucf_msw4_base_r() (0x8101604000U)
#define addr_map_rpg_pm_ucf_msw4_limit_r() (0x8101604fffU)
#define addr_map_rpg_pm_ucf_msw5_base_r() (0x8101605000U)
#define addr_map_rpg_pm_ucf_msw5_limit_r() (0x8101605fffU)
#define addr_map_rpg_pm_ucf_msw6_base_r() (0x8101606000U)
#define addr_map_rpg_pm_ucf_msw6_limit_r() (0x8101606fffU)
#define addr_map_rpg_pm_ucf_msw7_base_r() (0x8101607000U)
#define addr_map_rpg_pm_ucf_msw7_limit_r() (0x8101607fffU)
#define addr_map_rpg_pm_ucf_msw8_base_r() (0x8101608000U)
#define addr_map_rpg_pm_ucf_msw8_limit_r() (0x8101608fffU)
#define addr_map_rpg_pm_ucf_msw9_base_r() (0x8101609000U)
#define addr_map_rpg_pm_ucf_msw9_limit_r() (0x8101609fffU)
#define addr_map_rpg_pm_ucf_msw10_base_r() (0x810160a000U)
#define addr_map_rpg_pm_ucf_msw10_limit_r() (0x810160afffU)
#define addr_map_rpg_pm_ucf_msw11_base_r() (0x810160b000U)
#define addr_map_rpg_pm_ucf_msw11_limit_r() (0x810160bfffU)
#define addr_map_rpg_pm_ucf_msw12_base_r() (0x810160c000U)
#define addr_map_rpg_pm_ucf_msw12_limit_r() (0x810160cfffU)
#define addr_map_rpg_pm_ucf_msw13_base_r() (0x810160d000U)
#define addr_map_rpg_pm_ucf_msw13_limit_r() (0x810160dfffU)
#define addr_map_rpg_pm_ucf_msw14_base_r() (0x810160e000U)
#define addr_map_rpg_pm_ucf_msw14_limit_r() (0x810160efffU)
#define addr_map_rpg_pm_ucf_msw15_base_r() (0x810160f000U)
#define addr_map_rpg_pm_ucf_msw15_limit_r() (0x810160ffffU)
#define addr_map_ucf_msn0_msw_base_r() (0x8128000000U)
#define addr_map_ucf_msn0_msw_limit_r() (0x8128000080U)
#define addr_map_ucf_msn1_msw_base_r() (0x8128200000U)
#define addr_map_ucf_msn1_msw_limit_r() (0x8128200080U)
#define addr_map_ucf_msn2_msw_base_r() (0x8128400000U)
#define addr_map_ucf_msn2_msw_limit_r() (0x8128400080U)
#define addr_map_ucf_msn3_msw_base_r() (0x8128600000U)
#define addr_map_ucf_msn3_msw_limit_r() (0x8128600080U)
#define addr_map_ucf_msn4_msw_base_r() (0x8128800000U)
#define addr_map_ucf_msn4_msw_limit_r() (0x8128800080U)
#define addr_map_ucf_msn5_msw_base_r() (0x8128a00000U)
#define addr_map_ucf_msn5_msw_limit_r() (0x8128a00080U)
#define addr_map_ucf_msn6_msw_base_r() (0x8128c00000U)
#define addr_map_ucf_msn6_msw_limit_r() (0x8128c00080U)
#define addr_map_ucf_msn7_msw_base_r() (0x8128e00000U)
#define addr_map_ucf_msn7_msw_limit_r() (0x8128e00080U)
#define addr_map_ucf_msn0_slice0_base_r() (0x812a040000U)
#define addr_map_ucf_msn0_slice0_limit_r() (0x812a040080U)
#define addr_map_ucf_msn0_slice1_base_r() (0x812a140000U)
#define addr_map_ucf_msn0_slice1_limit_r() (0x812a140080U)
#define addr_map_ucf_msn1_slice0_base_r() (0x812a240000U)
#define addr_map_ucf_msn1_slice0_limit_r() (0x812a240080U)
#define addr_map_ucf_msn1_slice1_base_r() (0x812a340000U)
#define addr_map_ucf_msn1_slice1_limit_r() (0x812a340080U)
#define addr_map_ucf_msn2_slice0_base_r() (0x812a440000U)
#define addr_map_ucf_msn2_slice0_limit_r() (0x812a440080U)
#define addr_map_ucf_msn2_slice1_base_r() (0x812a540000U)
#define addr_map_ucf_msn2_slice1_limit_r() (0x812a540080U)
#define addr_map_ucf_msn3_slice0_base_r() (0x812a640000U)
#define addr_map_ucf_msn3_slice0_limit_r() (0x812a640080U)
#define addr_map_ucf_msn3_slice1_base_r() (0x812a740000U)
#define addr_map_ucf_msn3_slice1_limit_r() (0x812a740080U)
#define addr_map_ucf_msn4_slice0_base_r() (0x812a840000U)
#define addr_map_ucf_msn4_slice0_limit_r() (0x812a840080U)
#define addr_map_ucf_msn4_slice1_base_r() (0x812a940000U)
#define addr_map_ucf_msn4_slice1_limit_r() (0x812a940080U)
#define addr_map_ucf_msn5_slice0_base_r() (0x812aa40000U)
#define addr_map_ucf_msn5_slice0_limit_r() (0x812aa40080U)
#define addr_map_ucf_msn5_slice1_base_r() (0x812ab40000U)
#define addr_map_ucf_msn5_slice1_limit_r() (0x812ab40080U)
#define addr_map_ucf_msn6_slice0_base_r() (0x812ac40000U)
#define addr_map_ucf_msn6_slice0_limit_r() (0x812ac40080U)
#define addr_map_ucf_msn6_slice1_base_r() (0x812ad40000U)
#define addr_map_ucf_msn6_slice1_limit_r() (0x812ad40080U)
#define addr_map_ucf_msn7_slice0_base_r() (0x812ae40000U)
#define addr_map_ucf_msn7_slice0_limit_r() (0x812ae40080U)
#define addr_map_ucf_msn7_slice1_base_r() (0x812af40000U)
#define addr_map_ucf_msn7_slice1_limit_r() (0x812af40080U)
#define addr_map_rpg_pm_ucf_psw0_base_r() (0x8101644000U)
#define addr_map_rpg_pm_ucf_psw0_limit_r() (0x8101644fffU)
#define addr_map_rpg_pm_ucf_psw1_base_r() (0x8101645000U)
#define addr_map_rpg_pm_ucf_psw1_limit_r() (0x8101645fffU)
#define addr_map_rpg_pm_ucf_psw2_base_r() (0x8101646000U)
#define addr_map_rpg_pm_ucf_psw2_limit_r() (0x8101646fffU)
#define addr_map_rpg_pm_ucf_psw3_base_r() (0x8101647000U)
#define addr_map_rpg_pm_ucf_psw3_limit_r() (0x8101647fffU)
#define addr_map_ucf_psn0_psw_base_r() (0x8130080000U)
#define addr_map_ucf_psn0_psw_limit_r() (0x8130080020U)
#define addr_map_ucf_psn1_psw_base_r() (0x8130480000U)
#define addr_map_ucf_psn1_psw_limit_r() (0x8130480020U)
#define addr_map_ucf_psn2_psw_base_r() (0x8130880000U)
#define addr_map_ucf_psn2_psw_limit_r() (0x8130880020U)
#define addr_map_ucf_psn3_psw_base_r() (0x8130c80000U)
#define addr_map_ucf_psn3_psw_limit_r() (0x8130c80020U)
#define addr_map_rpg_pm_ucf_vddmss0_base_r() (0x8101631000U)
#define addr_map_rpg_pm_ucf_vddmss0_limit_r() (0x8101631fffU)
#define addr_map_rpg_pm_ucf_vddmss1_base_r() (0x8101632000U)
#define addr_map_rpg_pm_ucf_vddmss1_limit_r() (0x8101632fffU)
#define addr_map_ucf_csw0_base_r() (0x8122000000U)
#define addr_map_ucf_csw0_limit_r() (0x8122000080U)
#define addr_map_ucf_csw1_base_r() (0x8122400000U)
#define addr_map_ucf_csw1_limit_r() (0x8122400080U)
#define addr_map_rpg_pm_cpu_core_base_r() (0x14100000U)
#define addr_map_rpg_pm_cpu_core_base_width_v() (0x00000014U)
#define addr_map_cpucore0_base_r() (0x8132030000U)
#define addr_map_cpucore0_base_size_v() (0x00001000U)
#define addr_map_cpucore1_base_r() (0x8132130000U)
#define addr_map_cpucore1_base_size_v() (0x00001000U)
#define addr_map_cpucore2_base_r() (0x8132230000U)
#define addr_map_cpucore2_base_size_v() (0x00001000U)
#define addr_map_cpucore3_base_r() (0x8132330000U)
#define addr_map_cpucore3_base_size_v() (0x00001000U)
#define addr_map_cpucore4_base_r() (0x8132430000U)
#define addr_map_cpucore4_base_size_v() (0x00001000U)
#define addr_map_cpucore5_base_r() (0x8132530000U)
#define addr_map_cpucore5_base_size_v() (0x00001000U)
#define addr_map_cpucore6_base_r() (0x8132630000U)
#define addr_map_cpucore6_base_size_v() (0x00001000U)
#define addr_map_cpucore7_base_r() (0x8132730000U)
#define addr_map_cpucore7_base_size_v() (0x00001000U)
#define addr_map_cpucore8_base_r() (0x8132830000U)
#define addr_map_cpucore8_base_size_v() (0x00001000U)
#define addr_map_cpucore9_base_r() (0x8132930000U)
#define addr_map_cpucore9_base_size_v() (0x00001000U)
#define addr_map_cpucore10_base_r() (0x8132a30000U)
#define addr_map_cpucore10_base_size_v() (0x00001000U)
#define addr_map_cpucore11_base_r() (0x8132b30000U)
#define addr_map_cpucore11_base_size_v() (0x00001000U)
#define addr_map_cpucore12_base_r() (0x8132c30000U)
#define addr_map_cpucore12_base_size_v() (0x00001000U)
#define addr_map_cpucore13_base_r() (0x8132d30000U)
#define addr_map_cpucore13_base_size_v() (0x00001000U)
#define addr_map_pmc_misc_base_r() (0xc9c0000U)
#endif /* T264_ADDR_MAP_SOC_HWPM_H */

View File

@@ -0,0 +1,192 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*/
/*
* Function/Macro naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef T264_PMASYS_SOC_HWPM_H
#define T264_PMASYS_SOC_HWPM_H
#define pmasys_channel_control_user_r(i,j)\
(0x1610a10U + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_control_user_update_bytes_f(v) (((v) & 0x1U) << 16U)
#define pmasys_channel_control_user_update_bytes_m() (0x1U << 16U)
#define pmasys_channel_control_user_update_bytes_doit_v() (0x00000001U)
#define pmasys_channel_control_user_update_bytes_doit_f() (0x10000U)
#define pmasys_channel_control_user_membuf_clear_status_m() (0x1U << 1U)
#define pmasys_channel_control_user_membuf_clear_status_doit_f() (0x2U)
#define pmasys_channel_mem_bump_r(i,j) (0x1610a14U + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_outbase_r(i,j) (0x1610a28U + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_outbase_ptr_f(v) (((v) & 0x7ffffffU) << 5U)
#define pmasys_channel_outbase_ptr_m() (0x7ffffffU << 5U)
#define pmasys_channel_outbase_ptr_v(r) (((r) >> 5U) & 0x7ffffffU)
#define pmasys_channel_outbase_ptr_init_f() (0x0U)
#define pmasys_channel_outbaseupper_r(i,j)\
(0x1610a2cU + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_outbaseupper_ptr_f(v) (((v) & 0x1ffffffU) << 0U)
#define pmasys_channel_outbaseupper_ptr_m() (0x1ffffffU << 0U)
#define pmasys_channel_outbaseupper_ptr_v(r) (((r) >> 0U) & 0x1ffffffU)
#define pmasys_channel_outbaseupper_ptr_init_f() (0x0U)
#define pmasys_channel_outsize_r(i,j) (0x1610a30U + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_outsize_numbytes_f(v) (((v) & 0x7ffffffU) << 5U)
#define pmasys_channel_outsize_numbytes_m() (0x7ffffffU << 5U)
#define pmasys_channel_outsize_numbytes_init_f() (0x0U)
#define pmasys_channel_mem_head_r(i,j) (0x1610a34U + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_mem_head_ptr_m() (0xfffffffU << 4U)
#define pmasys_channel_mem_head_ptr_init_f() (0x0U)
#define pmasys_channel_mem_bytes_r(i,j)\
(0x1610a38U + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_mem_bytes_numbytes_m() (0xfffffffU << 4U)
#define pmasys_channel_mem_bytes_numbytes_init_f() (0x0U)
#define pmasys_channel_mem_bytes_addr_r(i,j)\
(0x1610a3cU + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_mem_bytes_addr_ptr_f(v) (((v) & 0x3fffffffU) << 2U)
#define pmasys_channel_mem_bytes_addr_ptr_m() (0x3fffffffU << 2U)
#define pmasys_channel_mem_bytes_addr_ptr_init_f() (0x0U)
#define pmasys_cblock_bpc_mem_block_r(i) (0x1611e04U + ((i)*32U))
#define pmasys_cblock_bpc_mem_block_base_m() (0xffffffffU << 0U)
#define pmasys_cblock_bpc_mem_blockupper_r(i) (0x1611e08U + ((i)*32U))
#define pmasys_cblock_bpc_mem_blockupper_valid_f(v) (((v) & 0x1U) << 31U)
#define pmasys_cblock_bpc_mem_blockupper_valid_false_v() (0x00000000U)
#define pmasys_cblock_bpc_mem_blockupper_valid_true_v() (0x00000001U)
#define pmasys_channel_config_user_r(i,j)\
(0x1610a24U + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_config_user_stream_f(v) (((v) & 0x1U) << 0U)
#define pmasys_channel_config_user_stream_m() (0x1U << 0U)
#define pmasys_channel_config_user_stream_disable_f() (0x0U)
#define pmasys_channel_config_user_coalesce_timeout_cycles_f(v)\
(((v) & 0x7U) << 24U)
#define pmasys_channel_config_user_coalesce_timeout_cycles_m() (0x7U << 24U)
#define pmasys_channel_config_user_coalesce_timeout_cycles__prod_v()\
(0x00000004U)
#define pmasys_channel_config_user_coalesce_timeout_cycles__prod_f()\
(0x4000000U)
#define pmasys_channel_status_r(i,j) (0x1610a00U + ((i) * 128U)) + ((j) * 64U)
#define pmasys_channel_status_engine_status_m() (0x7U << 0U)
#define pmasys_channel_status_engine_status_empty_v() (0x00000000U)
#define pmasys_channel_status_engine_status_empty_f() (0x0U)
#define pmasys_channel_status_engine_status_active_v() (0x00000001U)
#define pmasys_channel_status_engine_status_paused_v() (0x00000002U)
#define pmasys_channel_status_engine_status_quiescent_v() (0x00000003U)
#define pmasys_channel_status_engine_status_stalled_v() (0x00000005U)
#define pmasys_channel_status_engine_status_faulted_v() (0x00000006U)
#define pmasys_channel_status_engine_status_halted_v() (0x00000007U)
#define pmasys_channel_status_membuf_status_f(v) (((v) & 0x1U) << 16U)
#define pmasys_channel_status_membuf_status_m() (0x1U << 16U)
#define pmasys_channel_status_membuf_status_v(r) (((r) >> 16U) & 0x1U)
#define pmasys_channel_status_membuf_status_overflowed_v() (0x00000001U)
#define pmasys_channel_status_membuf_status_init_f() (0x0U)
#define pmasys_command_slice_trigger_start_mask0_r(i) (0x1611128U + ((i)*144U))
#define pmasys_command_slice_trigger_start_mask0_engine_m() (0xffffffffU << 0U)
#define pmasys_command_slice_trigger_start_mask0_engine_init_f() (0x0U)
#define pmasys_command_slice_trigger_start_mask1_r(i) (0x161112cU + ((i)*144U))
#define pmasys_command_slice_trigger_start_mask1_engine_m() (0xffffffffU << 0U)
#define pmasys_command_slice_trigger_start_mask1_engine_init_f() (0x0U)
#define pmasys_command_slice_trigger_stop_mask0_r(i) (0x1611130U + ((i)*144U))
#define pmasys_command_slice_trigger_stop_mask0_engine_m() (0xffffffffU << 0U)
#define pmasys_command_slice_trigger_stop_mask0_engine_init_f() (0x0U)
#define pmasys_command_slice_trigger_stop_mask1_r(i) (0x1611134U + ((i)*144U))
#define pmasys_command_slice_trigger_stop_mask1_engine_m() (0xffffffffU << 0U)
#define pmasys_command_slice_trigger_stop_mask1_engine_init_f() (0x0U)
#define pmasys_command_slice_trigger_config_user_r(i) (0x161111cU + ((i)*144U))
#define pmasys_command_slice_trigger_config_user_pma_pulse_f(v)\
(((v) & 0x1U) << 0U)
#define pmasys_command_slice_trigger_config_user_pma_pulse_m() (0x1U << 0U)
#define pmasys_command_slice_trigger_config_user_pma_pulse_disable_v()\
(0x00000000U)
#define pmasys_command_slice_trigger_config_user_pma_pulse_disable_f() (0x0U)
#define pmasys_command_slice_trigger_config_user_record_stream_f(v)\
(((v) & 0x1U) << 8U)
#define pmasys_command_slice_trigger_config_user_record_stream_m() (0x1U << 8U)
#define pmasys_command_slice_trigger_config_user_record_stream_disable_v()\
(0x00000000U)
#define pmasys_command_slice_trigger_config_user_record_stream_disable_f()\
(0x0U)
#define pmasys_streaming_capabilities1_r() (0x16109f4U)
#define pmasys_streaming_capabilities1_local_credits_f(v) (((v) & 0x1ffU) << 0U)
#define pmasys_streaming_capabilities1_local_credits_m() (0x1ffU << 0U)
#define pmasys_streaming_capabilities1_local_credits_init_v() (0x00000100U)
#define pmasys_streaming_capabilities1_total_credits_f(v) (((v) & 0x7ffU) << 9U)
#define pmasys_streaming_capabilities1_total_credits_m() (0x7ffU << 9U)
#define pmasys_streaming_capabilities1_total_credits_v(r) (((r) >> 9U) & 0x7ffU)
#define pmasys_streaming_capabilities1_total_credits_init_f() (0x20000U)
#define pmasys_command_slice_trigger_mask_secure0_r(i) (0x1611110U + ((i)*144U))
#define pmasys_command_slice_trigger_mask_secure0_engine_f(v)\
(((v) & 0xffffffffU) << 0U)
#define pmasys_command_slice_trigger_mask_secure0_engine_m() (0xffffffffU << 0U)
#define pmasys_command_slice_record_select_secure_r(i) (0x1611180U + ((i)*144U))
#define pmasys_command_slice_record_select_secure_trigger_select_f(v)\
(((v) & 0x3fU) << 0U)
#define pmasys_command_slice_record_select_secure_trigger_select_m()\
(0x3fU << 0U)
#define pmasys_profiling_cg2_secure_r() (0x1610844U)
#define pmasys_profiling_cg2_secure_slcg_f(v) (((v) & 0x1U) << 0U)
#define pmasys_profiling_cg2_secure_slcg_m() (0x1U << 0U)
#define pmasys_profiling_cg2_secure_slcg_enabled_v() (0x00000000U)
#define pmasys_profiling_cg2_secure_slcg_enabled_f() (0x0U)
#define pmasys_profiling_cg2_secure_slcg__prod_v() (0x00000000U)
#define pmasys_profiling_cg2_secure_slcg__prod_f() (0x0U)
#define pmasys_profiling_cg2_secure_slcg_disabled_v() (0x00000001U)
#define pmasys_profiling_cg2_secure_slcg_disabled_f() (0x1U)
#define pmasys_profiling_cg1_secure_r() (0x1610848U)
#define pmasys_profiling_cg1_secure_flcg_f(v) (((v) & 0x1U) << 31U)
#define pmasys_profiling_cg1_secure_flcg_m() (0x1U << 31U)
#define pmasys_profiling_cg1_secure_flcg_enabled_v() (0x00000001U)
#define pmasys_profiling_cg1_secure_flcg_enabled_f() (0x80000000U)
#define pmasys_profiling_cg1_secure_flcg__prod_v() (0x00000001U)
#define pmasys_profiling_cg1_secure_flcg__prod_f() (0x80000000U)
#define pmasys_profiling_cg1_secure_flcg_disabled_v() (0x00000000U)
#define pmasys_profiling_cg1_secure_flcg_disabled_f() (0x0U)
#endif /* T264_PMASYS_SOC_HWPM_H */

View File

@@ -0,0 +1,170 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*/
/*
* Function/Macro naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef T264_PMMSYS_SOC_HWPM_H
#define T264_PMMSYS_SOC_HWPM_H
#define pmmsys_perdomain_offset_v() (0x00001000U)
#define pmmsys_user_channel_register_stride_v() (0x00000020U)
#define pmmsys_num_user_command_slices_v() (0x00000002U)
#define pmmsys_num_cblocks_v() (0x00000001U)
#define pmmsys_num_streaming_channels_v() (0x00000002U)
#define pmmsys_num_channels_per_cblock_v() (0x00000002U)
#define pmmsys_cblock_stride_v() (0x00000020U)
#define pmmsys_channel_stride_v() (0x00000010U)
#define pmmsys_dg_bitmap_array_size_v() (0x00000008U)
#define pmmsys_control_r(i) (0x160009cU + ((i)*4096U))
#define pmmsys_control_mode_f(v) (((v) & 0x7U) << 0U)
#define pmmsys_control_mode_m() (0x7U << 0U)
#define pmmsys_control_mode_disable_v() (0x00000000U)
#define pmmsys_control_mode_disable_f() (0x0U)
#define pmmsys_control_mode_a_v() (0x00000001U)
#define pmmsys_control_mode_b_v() (0x00000002U)
#define pmmsys_control_mode_c_v() (0x00000003U)
#define pmmsys_control_mode_e_v() (0x00000005U)
#define pmmsys_control_mode_null_v() (0x00000007U)
#define pmmsys_control_o() (0x9cU)
#define pmmsys_enginestatus_r(i) (0x16000c8U + ((i)*4096U))
#define pmmsys_enginestatus_enable_f(v) (((v) & 0x1U) << 8U)
#define pmmsys_enginestatus_enable_m() (0x1U << 8U)
#define pmmsys_enginestatus_enable_out_v() (0x00000001U)
#define pmmsys_enginestatus_enable_out_f() (0x100U)
#define pmmsys_enginestatus_o() (0xc8U)
#define pmmsys_secure_config_r(i) (0x160012cU + ((i)*4096U))
#define pmmsys_secure_config_o() (0x12cU)
#define pmmsys_secure_config_cmd_slice_id_f(v) (((v) & 0x1fU) << 0U)
#define pmmsys_secure_config_cmd_slice_id_m() (0x1fU << 0U)
#define pmmsys_secure_config_channel_id_f(v) (((v) & 0x3U) << 8U)
#define pmmsys_secure_config_channel_id_m() (0x3U << 8U)
#define pmmsys_secure_config_cblock_id_f(v) (((v) & 0xfU) << 11U)
#define pmmsys_secure_config_cblock_id_m() (0xfU << 11U)
#define pmmsys_secure_config_dg_idx_v(r) (((r) >> 16U) & 0xffU)
#define pmmsys_secure_config_mapped_f(v) (((v) & 0x1U) << 28U)
#define pmmsys_secure_config_mapped_m() (0x1U << 28U)
#define pmmsys_secure_config_mapped_false_f() (0x0U)
#define pmmsys_secure_config_mapped_true_f() (0x10000000U)
#define pmmsys_secure_config_use_prog_dg_idx_f(v) (((v) & 0x1U) << 30U)
#define pmmsys_secure_config_use_prog_dg_idx_m() (0x1U << 30U)
#define pmmsys_secure_config_use_prog_dg_idx_false_f() (0x0U)
#define pmmsys_secure_config_use_prog_dg_idx_true_f() (0x40000000U)
#define pmmsys_secure_config_command_pkt_decoder_f(v) (((v) & 0x1U) << 31U)
#define pmmsys_secure_config_command_pkt_decoder_m() (0x1U << 31U)
#define pmmsys_secure_config_command_pkt_decoder_disable_f() (0x0U)
#define pmmsys_secure_config_command_pkt_decoder_enable_f() (0x80000000U)
#define pmmsys_router_user_dgmap_status_secure_r(i) (0x1612050U + ((i)*4U))
#define pmmsys_router_user_dgmap_status_secure__size_1_v() (0x00000008U)
#define pmmsys_router_user_dgmap_status_secure_dg_s() (1U)
#define pmmsys_router_user_dgmap_status_secure_dg_not_mapped_v() (0x00000000U)
#define pmmsys_router_user_dgmap_status_secure_dg_mapped_v() (0x00000001U)
#define pmmsys_router_enginestatus_r() (0x1612080U)
#define pmmsys_router_enginestatus_status_f(v) (((v) & 0x7U) << 0U)
#define pmmsys_router_enginestatus_status_m() (0x7U << 0U)
#define pmmsys_router_enginestatus_status_v(r) (((r) >> 0U) & 0x7U)
#define pmmsys_router_enginestatus_status_empty_v() (0x00000000U)
#define pmmsys_router_enginestatus_status_active_v() (0x00000001U)
#define pmmsys_router_enginestatus_status_paused_v() (0x00000002U)
#define pmmsys_router_enginestatus_status_quiescent_v() (0x00000003U)
#define pmmsys_router_enginestatus_status_stalled_v() (0x00000005U)
#define pmmsys_router_enginestatus_status_faulted_v() (0x00000006U)
#define pmmsys_router_enginestatus_status_halted_v() (0x00000007U)
#define pmmsys_router_enginestatus_merged_perfmon_status_f(v)\
(((v) & 0x7U) << 8U)
#define pmmsys_router_enginestatus_merged_perfmon_status_m() (0x7U << 8U)
#define pmmsys_router_enginestatus_merged_perfmon_status_v(r)\
(((r) >> 8U) & 0x7U)
#define pmmsys_router_profiling_dg_cg1_secure_r() (0x1612094U)
#define pmmsys_router_profiling_dg_cg1_secure_flcg_f(v) (((v) & 0x1U) << 31U)
#define pmmsys_router_profiling_dg_cg1_secure_flcg_m() (0x1U << 31U)
#define pmmsys_router_profiling_dg_cg1_secure_flcg__prod_v() (0x00000001U)
#define pmmsys_router_profiling_dg_cg1_secure_flcg__prod_f() (0x80000000U)
#define pmmsys_router_profiling_dg_cg1_secure_flcg_disabled_v() (0x00000000U)
#define pmmsys_router_profiling_dg_cg1_secure_flcg_disabled_f() (0x0U)
#define pmmsys_router_profiling_dg_cg1_secure_flcg_enabled_v() (0x00000001U)
#define pmmsys_router_profiling_dg_cg1_secure_flcg_enabled_f() (0x80000000U)
#define pmmsys_router_profiling_cg1_secure_r() (0x1612098U)
#define pmmsys_router_profiling_cg1_secure_flcg_f(v) (((v) & 0x1U) << 31U)
#define pmmsys_router_profiling_cg1_secure_flcg_m() (0x1U << 31U)
#define pmmsys_router_profiling_cg1_secure_flcg__prod_v() (0x00000001U)
#define pmmsys_router_profiling_cg1_secure_flcg__prod_f() (0x80000000U)
#define pmmsys_router_profiling_cg1_secure_flcg_disabled_v() (0x00000000U)
#define pmmsys_router_profiling_cg1_secure_flcg_disabled_f() (0x0U)
#define pmmsys_router_profiling_cg1_secure_flcg_enabled_v() (0x00000001U)
#define pmmsys_router_profiling_cg1_secure_flcg_enabled_f() (0x80000000U)
#define pmmsys_router_perfmon_cg2_secure_r() (0x161209cU)
#define pmmsys_router_perfmon_cg2_secure_slcg_f(v) (((v) & 0x1U) << 31U)
#define pmmsys_router_perfmon_cg2_secure_slcg_m() (0x1U << 31U)
#define pmmsys_router_perfmon_cg2_secure_slcg__prod_v() (0x00000000U)
#define pmmsys_router_perfmon_cg2_secure_slcg__prod_f() (0x0U)
#define pmmsys_router_perfmon_cg2_secure_slcg_disabled_v() (0x00000001U)
#define pmmsys_router_perfmon_cg2_secure_slcg_disabled_f() (0x80000000U)
#define pmmsys_router_perfmon_cg2_secure_slcg_enabled_v() (0x00000000U)
#define pmmsys_router_perfmon_cg2_secure_slcg_enabled_f() (0x0U)
#define pmmsys_router_profiling_cg2_secure_r() (0x1612090U)
#define pmmsys_router_profiling_cg2_secure_slcg_f(v) (((v) & 0x1U) << 31U)
#define pmmsys_router_profiling_cg2_secure_slcg_m() (0x1U << 31U)
#define pmmsys_router_profiling_cg2_secure_slcg__prod_v() (0x00000000U)
#define pmmsys_router_profiling_cg2_secure_slcg__prod_f() (0x0U)
#define pmmsys_router_profiling_cg2_secure_slcg_disabled_v() (0x00000001U)
#define pmmsys_router_profiling_cg2_secure_slcg_disabled_f() (0x80000000U)
#define pmmsys_router_profiling_cg2_secure_slcg_enabled_v() (0x00000000U)
#define pmmsys_router_profiling_cg2_secure_slcg_enabled_f() (0x0U)
#define pmmsys_user_channel_config_secure_r(i,j)\
(0x16120b8U + ((i) * 32U)) + ((j) * 16U)
#define pmmsys_user_channel_config_secure_hs_credits_m() (0x1ffU << 0U)
#define pmmsys_user_channel_config_secure_hs_credits_init_f() (0x0U)
#endif /* T264_PMMSYS_SOC_HWPM_H */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,107 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_CPU_H
#define T264_HWPM_IP_CPU_H
#if defined(CONFIG_T264_HWPM_IP_CPU)
#define T264_HWPM_ACTIVE_IP_CPU T264_HWPM_IP_CPU,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_CPU_NUM_INSTANCES 14U
#define T264_HWPM_IP_CPU_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_CPU_NUM_PERFMON_PER_INST 1U
#define T264_HWPM_IP_CPU_NUM_PERFMUX_PER_INST 1U
#define T264_HWPM_IP_CPU_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_cpu;
#define addr_map_rpg_pm_cpu_core_size() BIT(0x00000014U)
#define addr_map_rpg_pm_cpu_core0_base_r() \
(addr_map_rpg_pm_cpu_core_base_r())
#define addr_map_rpg_pm_cpu_core0_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x00FFF)
#define addr_map_rpg_pm_cpu_core1_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x10000)
#define addr_map_rpg_pm_cpu_core1_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x10FFF)
#define addr_map_rpg_pm_cpu_core2_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x20000)
#define addr_map_rpg_pm_cpu_core2_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x20FFF)
#define addr_map_rpg_pm_cpu_core3_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x30000)
#define addr_map_rpg_pm_cpu_core3_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x30FFF)
#define addr_map_rpg_pm_cpu_core4_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x40000)
#define addr_map_rpg_pm_cpu_core4_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x40FFF)
#define addr_map_rpg_pm_cpu_core5_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x50000)
#define addr_map_rpg_pm_cpu_core5_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x50FFF)
#define addr_map_rpg_pm_cpu_core6_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x60000)
#define addr_map_rpg_pm_cpu_core6_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x60FFF)
#define addr_map_rpg_pm_cpu_core7_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x70000)
#define addr_map_rpg_pm_cpu_core7_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x70FFF)
#define addr_map_rpg_pm_cpu_core8_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x80000)
#define addr_map_rpg_pm_cpu_core8_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x80FFF)
#define addr_map_rpg_pm_cpu_core9_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x90000)
#define addr_map_rpg_pm_cpu_core9_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0x90FFF)
#define addr_map_rpg_pm_cpu_core10_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0xa0000)
#define addr_map_rpg_pm_cpu_core10_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0xa0FFF)
#define addr_map_rpg_pm_cpu_core11_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0xb0000)
#define addr_map_rpg_pm_cpu_core11_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0xb0FFF)
#define addr_map_rpg_pm_cpu_core12_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0xc0000)
#define addr_map_rpg_pm_cpu_core12_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0xc0FFF)
#define addr_map_rpg_pm_cpu_core13_base_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0xd0000)
#define addr_map_rpg_pm_cpu_core13_limit_r() \
(addr_map_rpg_pm_cpu_core_base_r() + 0xd0FFF)
#else
#define T264_HWPM_ACTIVE_IP_CPU
#endif
#endif /* T264_HWPM_IP_CPU_H */

View File

@@ -0,0 +1,714 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#include "t264_mss_channel.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_mss_channel_inst0_perfmon_element_static_array[
T264_HWPM_IP_MSS_CHANNEL_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 1U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_parta0",
.device_index = T264_MSS_CHANNEL_PARTA0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss0_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss0_limit_r(),
.start_pa = addr_map_rpg_pm_mss0_base_r(),
.end_pa = addr_map_rpg_pm_mss0_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 1U,
.element_index_mask = BIT(1),
.element_index = 2U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_parta1",
.device_index = T264_MSS_CHANNEL_PARTA1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss1_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss1_limit_r(),
.start_pa = addr_map_rpg_pm_mss1_base_r(),
.end_pa = addr_map_rpg_pm_mss1_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 2U,
.element_index_mask = BIT(2),
.element_index = 3U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_parta2",
.device_index = T264_MSS_CHANNEL_PARTA2_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss2_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss2_limit_r(),
.start_pa = addr_map_rpg_pm_mss2_base_r(),
.end_pa = addr_map_rpg_pm_mss2_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 3U,
.element_index_mask = BIT(3),
.element_index = 4U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_parta3",
.device_index = T264_MSS_CHANNEL_PARTA3_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss3_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss3_limit_r(),
.start_pa = addr_map_rpg_pm_mss3_base_r(),
.end_pa = addr_map_rpg_pm_mss3_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 4U,
.element_index_mask = BIT(4),
.element_index = 5U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partb0",
.device_index = T264_MSS_CHANNEL_PARTB0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss4_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss4_limit_r(),
.start_pa = addr_map_rpg_pm_mss4_base_r(),
.end_pa = addr_map_rpg_pm_mss4_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 5U,
.element_index_mask = BIT(5),
.element_index = 6U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partb1",
.device_index = T264_MSS_CHANNEL_PARTB1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss5_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss5_limit_r(),
.start_pa = addr_map_rpg_pm_mss5_base_r(),
.end_pa = addr_map_rpg_pm_mss5_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 6U,
.element_index_mask = BIT(6),
.element_index = 7U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partb2",
.device_index = T264_MSS_CHANNEL_PARTB2_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss6_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss6_limit_r(),
.start_pa = addr_map_rpg_pm_mss6_base_r(),
.end_pa = addr_map_rpg_pm_mss6_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 7U,
.element_index_mask = BIT(7),
.element_index = 8U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partb3",
.device_index = T264_MSS_CHANNEL_PARTB3_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss7_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss7_limit_r(),
.start_pa = addr_map_rpg_pm_mss7_base_r(),
.end_pa = addr_map_rpg_pm_mss7_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 8U,
.element_index_mask = BIT(8),
.element_index = 9U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partc0",
.device_index = T264_MSS_CHANNEL_PARTC0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss8_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss8_limit_r(),
.start_pa = addr_map_rpg_pm_mss8_base_r(),
.end_pa = addr_map_rpg_pm_mss8_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 9U,
.element_index_mask = BIT(9),
.element_index = 10U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partc1",
.device_index = T264_MSS_CHANNEL_PARTC1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss9_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss9_limit_r(),
.start_pa = addr_map_rpg_pm_mss9_base_r(),
.end_pa = addr_map_rpg_pm_mss9_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 10U,
.element_index_mask = BIT(10),
.element_index = 11U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partc2",
.device_index = T264_MSS_CHANNEL_PARTC2_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss10_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss10_limit_r(),
.start_pa = addr_map_rpg_pm_mss10_base_r(),
.end_pa = addr_map_rpg_pm_mss10_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 11U,
.element_index_mask = BIT(11),
.element_index = 12U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partc3",
.device_index = T264_MSS_CHANNEL_PARTC3_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss11_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss11_limit_r(),
.start_pa = addr_map_rpg_pm_mss11_base_r(),
.end_pa = addr_map_rpg_pm_mss11_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 12U,
.element_index_mask = BIT(12),
.element_index = 13U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partd0",
.device_index = T264_MSS_CHANNEL_PARTD0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss12_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss12_limit_r(),
.start_pa = addr_map_rpg_pm_mss12_base_r(),
.end_pa = addr_map_rpg_pm_mss12_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 13U,
.element_index_mask = BIT(13),
.element_index = 14U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partd1",
.device_index = T264_MSS_CHANNEL_PARTD1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss13_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss13_limit_r(),
.start_pa = addr_map_rpg_pm_mss13_base_r(),
.end_pa = addr_map_rpg_pm_mss13_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 14U,
.element_index_mask = BIT(14),
.element_index = 15U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partd2",
.device_index = T264_MSS_CHANNEL_PARTD2_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss14_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss14_limit_r(),
.start_pa = addr_map_rpg_pm_mss14_base_r(),
.end_pa = addr_map_rpg_pm_mss14_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 15U,
.element_index_mask = BIT(15),
.element_index = 16U,
.dt_mmio = NULL,
.name = "perfmon_msschannel_partd3",
.device_index = T264_MSS_CHANNEL_PARTD3_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_mss15_base_r(),
.end_abs_pa = addr_map_rpg_pm_mss15_limit_r(),
.start_pa = addr_map_rpg_pm_mss15_base_r(),
.end_pa = addr_map_rpg_pm_mss15_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_mss_channel_inst0_perfmux_element_static_array[
T264_HWPM_IP_MSS_CHANNEL_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 1U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc0_base_r(),
.end_abs_pa = addr_map_mc0_limit_r(),
.start_pa = addr_map_mc0_base_r(),
.end_pa = addr_map_mc0_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 1U,
.element_index_mask = BIT(1),
.element_index = 2U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc1_base_r(),
.end_abs_pa = addr_map_mc1_limit_r(),
.start_pa = addr_map_mc1_base_r(),
.end_pa = addr_map_mc1_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 2U,
.element_index_mask = BIT(2),
.element_index = 3U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc2_base_r(),
.end_abs_pa = addr_map_mc2_limit_r(),
.start_pa = addr_map_mc2_base_r(),
.end_pa = addr_map_mc2_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 3U,
.element_index_mask = BIT(3),
.element_index = 4U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc3_base_r(),
.end_abs_pa = addr_map_mc3_limit_r(),
.start_pa = addr_map_mc3_base_r(),
.end_pa = addr_map_mc3_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 4U,
.element_index_mask = BIT(4),
.element_index = 5U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc4_base_r(),
.end_abs_pa = addr_map_mc4_limit_r(),
.start_pa = addr_map_mc4_base_r(),
.end_pa = addr_map_mc4_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 5U,
.element_index_mask = BIT(5),
.element_index = 6U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc5_base_r(),
.end_abs_pa = addr_map_mc5_limit_r(),
.start_pa = addr_map_mc5_base_r(),
.end_pa = addr_map_mc5_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 6U,
.element_index_mask = BIT(6),
.element_index = 7U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc6_base_r(),
.end_abs_pa = addr_map_mc6_limit_r(),
.start_pa = addr_map_mc6_base_r(),
.end_pa = addr_map_mc6_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 7U,
.element_index_mask = BIT(7),
.element_index = 8U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc7_base_r(),
.end_abs_pa = addr_map_mc7_limit_r(),
.start_pa = addr_map_mc7_base_r(),
.end_pa = addr_map_mc7_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 8U,
.element_index_mask = BIT(8),
.element_index = 9U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc8_base_r(),
.end_abs_pa = addr_map_mc8_limit_r(),
.start_pa = addr_map_mc8_base_r(),
.end_pa = addr_map_mc8_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 9U,
.element_index_mask = BIT(9),
.element_index = 10U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc9_base_r(),
.end_abs_pa = addr_map_mc9_limit_r(),
.start_pa = addr_map_mc9_base_r(),
.end_pa = addr_map_mc9_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 10U,
.element_index_mask = BIT(10),
.element_index = 11U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc10_base_r(),
.end_abs_pa = addr_map_mc10_limit_r(),
.start_pa = addr_map_mc10_base_r(),
.end_pa = addr_map_mc10_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 11U,
.element_index_mask = BIT(11),
.element_index = 12U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc11_base_r(),
.end_abs_pa = addr_map_mc11_limit_r(),
.start_pa = addr_map_mc11_base_r(),
.end_pa = addr_map_mc11_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 12U,
.element_index_mask = BIT(12),
.element_index = 13U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc12_base_r(),
.end_abs_pa = addr_map_mc12_limit_r(),
.start_pa = addr_map_mc12_base_r(),
.end_pa = addr_map_mc12_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 13U,
.element_index_mask = BIT(13),
.element_index = 14U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc13_base_r(),
.end_abs_pa = addr_map_mc13_limit_r(),
.start_pa = addr_map_mc13_base_r(),
.end_pa = addr_map_mc13_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 14U,
.element_index_mask = BIT(14),
.element_index = 15U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc14_base_r(),
.end_abs_pa = addr_map_mc14_limit_r(),
.start_pa = addr_map_mc14_base_r(),
.end_pa = addr_map_mc14_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 15U,
.element_index_mask = BIT(15),
.element_index = 16U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc15_base_r(),
.end_abs_pa = addr_map_mc15_limit_r(),
.start_pa = addr_map_mc15_base_r(),
.end_pa = addr_map_mc15_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_mss_channel_inst0_broadcast_element_static_array[
T264_HWPM_IP_MSS_CHANNEL_NUM_BROADCAST_PER_INST] = {
{
.element_type = IP_ELEMENT_BROADCAST,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mcb_base_r(),
.end_abs_pa = addr_map_mcb_limit_r(),
.start_pa = addr_map_mcb_base_r(),
.end_pa = addr_map_mcb_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_channel_alist,
.alist_size = ARRAY_SIZE(t264_mss_channel_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_mss_channel_inst_static_array[
T264_HWPM_IP_MSS_CHANNEL_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_MSS_CHANNEL_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_MSS_CHANNEL_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_mss_channel_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_mc0_base_r(),
.range_end = addr_map_mc15_limit_r(),
.element_stride = addr_map_mc0_limit_r() -
addr_map_mc0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_MSS_CHANNEL_NUM_BROADCAST_PER_INST,
.element_static_array =
t264_mss_channel_inst0_broadcast_element_static_array,
.range_start = addr_map_mcb_base_r(),
.range_end = addr_map_mcb_limit_r(),
.element_stride = addr_map_mcb_limit_r() -
addr_map_mcb_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_MSS_CHANNEL_NUM_PERFMON_PER_INST,
.element_static_array =
t264_mss_channel_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_mss0_base_r(),
.range_end = addr_map_rpg_pm_mss15_limit_r(),
.element_stride = addr_map_rpg_pm_mss0_limit_r() -
addr_map_rpg_pm_mss0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_mss_channel = {
.num_instances = T264_HWPM_IP_MSS_CHANNEL_NUM_INSTANCES,
.ip_inst_static_array = t264_mss_channel_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_mc0_base_r(),
.range_end = addr_map_mc15_limit_r(),
.inst_stride = addr_map_mc15_limit_r() -
addr_map_mc0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = addr_map_mcb_base_r(),
.range_end = addr_map_mcb_limit_r(),
.inst_stride = addr_map_mcb_limit_r() -
addr_map_mcb_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_mss0_base_r(),
.range_end = addr_map_rpg_pm_mss15_limit_r(),
.inst_stride = addr_map_rpg_pm_mss15_limit_r() -
addr_map_rpg_pm_mss0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK |
TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_INVALID,
.reserved = false,
};

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_MSS_CHANNEL_H
#define T264_HWPM_IP_MSS_CHANNEL_H
#if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
#define T264_HWPM_ACTIVE_IP_MSS_CHANNEL T264_HWPM_IP_MSS_CHANNEL,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_MSS_CHANNEL_NUM_INSTANCES 1U
#define T264_HWPM_IP_MSS_CHANNEL_NUM_CORE_ELEMENT_PER_INST 16U
#define T264_HWPM_IP_MSS_CHANNEL_NUM_PERFMON_PER_INST 16U
#define T264_HWPM_IP_MSS_CHANNEL_NUM_PERFMUX_PER_INST 16U
#define T264_HWPM_IP_MSS_CHANNEL_NUM_BROADCAST_PER_INST 1U
extern struct hwpm_ip t264_hwpm_ip_mss_channel;
#else
#define T264_HWPM_ACTIVE_IP_MSS_CHANNEL
#endif
#endif /* T264_HWPM_IP_MSS_CHANNEL_H */

View File

@@ -0,0 +1,483 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#include "t264_mss_hubs.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_mss_hubs_inst0_perfmon_element_static_array[
T264_HWPM_IP_MSS_HUBS_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 1U,
.dt_mmio = NULL,
.name = "perfmon_system_msshub0",
.device_index = T264_SYSTEM_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_system_msshub0_base_r(),
.end_abs_pa = addr_map_rpg_pm_system_msshub0_limit_r(),
.start_pa = addr_map_rpg_pm_system_msshub0_base_r(),
.end_pa = addr_map_rpg_pm_system_msshub0_limit_r(),
.base_pa = addr_map_rpg_grp_system_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 1U,
.element_index_mask = BIT(1),
.element_index = 2U,
.dt_mmio = NULL,
.name = "perfmon_disp_usb_msshub0",
.device_index = T264_DISP_USB_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_disp_usb_msshub0_base_r(),
.end_abs_pa = addr_map_rpg_pm_disp_usb_msshub0_limit_r(),
.start_pa = addr_map_rpg_pm_disp_usb_msshub0_base_r(),
.end_pa = addr_map_rpg_pm_disp_usb_msshub0_limit_r(),
.base_pa = addr_map_rpg_grp_disp_usb_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 2U,
.element_index_mask = BIT(2),
.element_index = 3U,
.dt_mmio = NULL,
.name = "perfmon_vision_msshub0",
.device_index = T264_VISION_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_vision_msshub0_base_r(),
.end_abs_pa = addr_map_rpg_pm_vision_msshub0_limit_r(),
.start_pa = addr_map_rpg_pm_vision_msshub0_base_r(),
.end_pa = addr_map_rpg_pm_vision_msshub0_limit_r(),
.base_pa = addr_map_rpg_grp_vision_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 3U,
.element_index_mask = BIT(3),
.element_index = 4U,
.dt_mmio = NULL,
.name = "perfmon_vision_msshub1",
.device_index = T264_VISION_MSS_HUB1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_vision_msshub1_base_r(),
.end_abs_pa = addr_map_rpg_pm_vision_msshub1_limit_r(),
.start_pa = addr_map_rpg_pm_vision_msshub1_base_r(),
.end_pa = addr_map_rpg_pm_vision_msshub1_limit_r(),
.base_pa = addr_map_rpg_grp_vision_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 4U,
.element_index_mask = BIT(4),
.element_index = 5U,
.dt_mmio = NULL,
.name = "perfmon_ucf_msshub0",
.device_index = T264_UCF_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_msshub0_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_msshub0_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_msshub0_base_r(),
.end_pa = addr_map_rpg_pm_ucf_msshub0_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 5U,
.element_index_mask = BIT(5),
.element_index = 6U,
.dt_mmio = NULL,
.name = "perfmon_ucf_msshub1",
.device_index = T264_UCF_MSS_HUB1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_msshub1_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_msshub1_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_msshub1_base_r(),
.end_pa = addr_map_rpg_pm_ucf_msshub1_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 6U,
.element_index_mask = BIT(6),
.element_index = 7U,
.dt_mmio = NULL,
.name = "perfmon_ucf_msshub2",
.device_index = T264_UCF_MSS_HUB2_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_msshub2_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_msshub2_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_msshub2_base_r(),
.end_pa = addr_map_rpg_pm_ucf_msshub2_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 7U,
.element_index_mask = BIT(7),
.element_index = 8U,
.dt_mmio = NULL,
.name = "perfmon_uphy0_msshub0",
.device_index = T264_UPHY0_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_uphy0_msshub0_base_r(),
.end_abs_pa = addr_map_rpg_pm_uphy0_msshub0_limit_r(),
.start_pa = addr_map_rpg_pm_uphy0_msshub0_base_r(),
.end_pa = addr_map_rpg_pm_uphy0_msshub0_limit_r(),
.base_pa = addr_map_rpg_grp_uphy0_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 8U,
.element_index_mask = BIT(8),
.element_index = 9U,
.dt_mmio = NULL,
.name = "perfmon_uphy0_msshub1",
.device_index = T264_UPHY0_MSS_HUB1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_uphy0_msshub1_base_r(),
.end_abs_pa = addr_map_rpg_pm_uphy0_msshub1_limit_r(),
.start_pa = addr_map_rpg_pm_uphy0_msshub1_base_r(),
.end_pa = addr_map_rpg_pm_uphy0_msshub1_limit_r(),
.base_pa = addr_map_rpg_grp_uphy0_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_mss_hubs_inst0_perfmux_element_static_array[
T264_HWPM_IP_MSS_HUBS_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 1U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc0_base_r(),
.end_abs_pa = addr_map_mc0_limit_r(),
.start_pa = addr_map_mc0_base_r(),
.end_pa = addr_map_mc0_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 1U,
.element_index_mask = BIT(1),
.element_index = 2U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc1_base_r(),
.end_abs_pa = addr_map_mc1_limit_r(),
.start_pa = addr_map_mc1_base_r(),
.end_pa = addr_map_mc1_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 2U,
.element_index_mask = BIT(2),
.element_index = 3U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc2_base_r(),
.end_abs_pa = addr_map_mc2_limit_r(),
.start_pa = addr_map_mc2_base_r(),
.end_pa = addr_map_mc2_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 3U,
.element_index_mask = BIT(3),
.element_index = 4U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc3_base_r(),
.end_abs_pa = addr_map_mc3_limit_r(),
.start_pa = addr_map_mc3_base_r(),
.end_pa = addr_map_mc3_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 4U,
.element_index_mask = BIT(4),
.element_index = 5U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc4_base_r(),
.end_abs_pa = addr_map_mc4_limit_r(),
.start_pa = addr_map_mc4_base_r(),
.end_pa = addr_map_mc4_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 5U,
.element_index_mask = BIT(5),
.element_index = 6U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc5_base_r(),
.end_abs_pa = addr_map_mc5_limit_r(),
.start_pa = addr_map_mc5_base_r(),
.end_pa = addr_map_mc5_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 6U,
.element_index_mask = BIT(6),
.element_index = 7U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc6_base_r(),
.end_abs_pa = addr_map_mc6_limit_r(),
.start_pa = addr_map_mc6_base_r(),
.end_pa = addr_map_mc6_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 7U,
.element_index_mask = BIT(7),
.element_index = 8U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc7_base_r(),
.end_abs_pa = addr_map_mc7_limit_r(),
.start_pa = addr_map_mc7_base_r(),
.end_pa = addr_map_mc7_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 8U,
.element_index_mask = BIT(8),
.element_index = 9U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mc8_base_r(),
.end_abs_pa = addr_map_mc8_limit_r(),
.start_pa = addr_map_mc8_base_r(),
.end_pa = addr_map_mc8_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_mss_hubs_inst0_broadcast_element_static_array[
T264_HWPM_IP_MSS_HUBS_NUM_BROADCAST_PER_INST] = {
{
.element_type = IP_ELEMENT_BROADCAST,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_mcb_base_r(),
.end_abs_pa = addr_map_mcb_limit_r(),
.start_pa = addr_map_mcb_base_r(),
.end_pa = addr_map_mcb_limit_r(),
.base_pa = 0ULL,
.alist = t264_mss_hub_alist,
.alist_size = ARRAY_SIZE(t264_mss_hub_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_mss_hubs_inst_static_array[
T264_HWPM_IP_MSS_HUBS_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_MSS_HUBS_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_MSS_HUBS_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_mss_hubs_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_mc0_base_r(),
.range_end = addr_map_mc8_limit_r(),
.element_stride = addr_map_mc0_limit_r() -
addr_map_mc0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_MSS_HUBS_NUM_BROADCAST_PER_INST,
.element_static_array =
t264_mss_hubs_inst0_broadcast_element_static_array,
.range_start = addr_map_mcb_base_r(),
.range_end = addr_map_mcb_limit_r(),
.element_stride = addr_map_mcb_limit_r() -
addr_map_mcb_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_MSS_HUBS_NUM_PERFMON_PER_INST,
.element_static_array =
t264_mss_hubs_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_system_msshub0_base_r(),
.range_end = addr_map_rpg_pm_uphy0_msshub1_limit_r(),
.element_stride = addr_map_rpg_pm_system_msshub0_limit_r() -
addr_map_rpg_pm_system_msshub0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_mss_hubs = {
.num_instances = T264_HWPM_IP_MSS_HUBS_NUM_INSTANCES,
.ip_inst_static_array = t264_mss_hubs_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_mc0_base_r(),
.range_end = addr_map_mc8_limit_r(),
.inst_stride = addr_map_mc8_limit_r() -
addr_map_mc0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = addr_map_mcb_base_r(),
.range_end = addr_map_mcb_limit_r(),
.inst_stride = addr_map_mcb_limit_r() -
addr_map_mcb_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_system_msshub0_base_r(),
.range_end = addr_map_rpg_pm_uphy0_msshub1_limit_r(),
.inst_stride = addr_map_rpg_pm_uphy0_msshub1_limit_r() -
addr_map_rpg_pm_system_msshub0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK |
TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_INVALID,
.reserved = false,
};

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_MSS_HUBS_H
#define T264_HWPM_IP_MSS_HUBS_H
#if defined(CONFIG_T264_HWPM_IP_MSS_HUBS)
#define T264_HWPM_ACTIVE_IP_MSS_HUBS T264_HWPM_IP_MSS_HUBS,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_MSS_HUBS_NUM_INSTANCES 1U
#define T264_HWPM_IP_MSS_HUBS_NUM_CORE_ELEMENT_PER_INST 9U
#define T264_HWPM_IP_MSS_HUBS_NUM_PERFMON_PER_INST 9U
#define T264_HWPM_IP_MSS_HUBS_NUM_PERFMUX_PER_INST 9U
#define T264_HWPM_IP_MSS_HUBS_NUM_BROADCAST_PER_INST 1U
extern struct hwpm_ip t264_hwpm_ip_mss_hubs;
#else
#define T264_HWPM_ACTIVE_IP_MSS_HUBS
#endif
#endif /* T264_HWPM_IP_MSS_HUBS_H */

View File

@@ -0,0 +1,195 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#include "t264_ocu.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_ocu_inst0_perfmon_element_static_array[
T264_HWPM_IP_OCU_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ocu0",
.device_index = T264_OCU0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ocu_base_r(),
.end_abs_pa = addr_map_rpg_pm_ocu_limit_r(),
.start_pa = addr_map_rpg_pm_ocu_base_r(),
.end_pa = addr_map_rpg_pm_ocu_limit_r(),
.base_pa = addr_map_rpg_grp_uphy0_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ocu_inst0_perfmux_element_static_array[
T264_HWPM_IP_OCU_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_ocu_base_r(),
.end_abs_pa = addr_map_ocu_limit_r(),
.start_pa = addr_map_ocu_base_r(),
.end_pa = addr_map_ocu_limit_r(),
.base_pa = 0ULL,
.alist = t264_ocu_alist,
.alist_size = ARRAY_SIZE(t264_ocu_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_ocu_inst_static_array[
T264_HWPM_IP_OCU_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_OCU_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_OCU_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_ocu_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_ocu_base_r(),
.range_end = addr_map_ocu_limit_r(),
.element_stride = addr_map_ocu_limit_r() -
addr_map_ocu_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_OCU_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_OCU_NUM_PERFMON_PER_INST,
.element_static_array =
t264_ocu_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ocu_base_r(),
.range_end = addr_map_rpg_pm_ocu_limit_r(),
.element_stride = addr_map_rpg_pm_ocu_limit_r() -
addr_map_rpg_pm_ocu_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_ocu = {
.num_instances = T264_HWPM_IP_OCU_NUM_INSTANCES,
.ip_inst_static_array = t264_ocu_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_ocu_base_r(),
.range_end = addr_map_ocu_limit_r(),
.inst_stride = addr_map_ocu_limit_r() -
addr_map_ocu_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_ocu_base_r(),
.range_end = addr_map_rpg_pm_ocu_limit_r(),
.inst_stride = addr_map_rpg_pm_ocu_limit_r() -
addr_map_rpg_pm_ocu_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK | TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_INVALID,
.reserved = false,
};

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_OCU_H
#define T264_HWPM_IP_OCU_H
#if defined(CONFIG_T264_HWPM_IP_OCU)
#define T264_HWPM_ACTIVE_IP_OCU T264_HWPM_IP_OCU,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_OCU_NUM_INSTANCES 1U
#define T264_HWPM_IP_OCU_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_OCU_NUM_PERFMON_PER_INST 1U
#define T264_HWPM_IP_OCU_NUM_PERFMUX_PER_INST 1U
#define T264_HWPM_IP_OCU_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_ocu;
#else
#define T264_HWPM_ACTIVE_IP_OCU
#endif
#endif /* T264_HWPM_IP_OCU_H */

View File

@@ -0,0 +1,190 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "t264_pma.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_pma_inst0_perfmon_element_static_array[
T264_HWPM_IP_PMA_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_hwpm",
.device_index = T264_HWPM_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_hwpm_base_r(),
.end_abs_pa = addr_map_rpg_pm_hwpm_limit_r(),
.start_pa = addr_map_rpg_pm_hwpm_base_r(),
.end_pa = addr_map_rpg_pm_hwpm_limit_r(),
.base_pa = addr_map_rpg_grp_system_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_pma_inst0_perfmux_element_static_array[
T264_HWPM_IP_PMA_NUM_PERFMUX_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMUX,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "pma",
.device_index = T264_PMA_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_pma_base_r(),
.end_abs_pa = addr_map_pma_limit_r(),
.start_pa = addr_map_pma_base_r(),
.end_pa = addr_map_pma_limit_r(),
.base_pa = addr_map_pma_base_r(),
.alist = t264_pma_res_pma_alist,
.alist_size = ARRAY_SIZE(t264_pma_res_pma_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_pma_inst_static_array[
T264_HWPM_IP_PMA_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_PMA_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_PMA_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_pma_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_pma_base_r(),
.range_end = addr_map_pma_limit_r(),
.element_stride = addr_map_pma_limit_r() -
addr_map_pma_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_PMA_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_PMA_NUM_PERFMON_PER_INST,
.element_static_array =
t264_pma_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_hwpm_base_r(),
.range_end = addr_map_rpg_pm_hwpm_limit_r(),
.element_stride = addr_map_rpg_pm_hwpm_limit_r() -
addr_map_rpg_pm_hwpm_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = -1,
},
.element_fs_mask = 0x1U,
.dev_name = "",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_pma = {
.num_instances = T264_HWPM_IP_PMA_NUM_INSTANCES,
.ip_inst_static_array = t264_pma_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_pma_base_r(),
.range_end = addr_map_pma_limit_r(),
.inst_stride = addr_map_pma_limit_r() -
addr_map_pma_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_hwpm_base_r(),
.range_end = addr_map_rpg_pm_hwpm_limit_r(),
.inst_stride = addr_map_rpg_pm_hwpm_limit_r() -
addr_map_rpg_pm_hwpm_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK |
TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0x1U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_VALID,
.reserved = false,
};

View File

@@ -0,0 +1,38 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef T264_HWPM_IP_PMA_H
#define T264_HWPM_IP_PMA_H
#define T264_HWPM_ACTIVE_IP_PMA T264_HWPM_IP_PMA,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_PMA_NUM_INSTANCES 1U
#define T264_HWPM_IP_PMA_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_PMA_NUM_PERFMON_PER_INST 1U
#define T264_HWPM_IP_PMA_NUM_PERFMUX_PER_INST 1U
#define T264_HWPM_IP_PMA_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_pma;
#endif /* T264_HWPM_IP_PMA_H */

View File

@@ -0,0 +1,280 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#include "t264_pva.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_pva_inst0_perfmon_element_static_array[
T264_HWPM_IP_PVA_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_pvac0",
.device_index = T264_PVAC0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_pvac0_base_r(),
.end_abs_pa = addr_map_rpg_pm_pvac0_limit_r(),
.start_pa = addr_map_rpg_pm_pvac0_base_r(),
.end_pa = addr_map_rpg_pm_pvac0_limit_r(),
.base_pa = addr_map_rpg_grp_vision_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 1U,
.element_index_mask = BIT(0),
.element_index = 1U,
.dt_mmio = NULL,
.name = "perfmon_pvav0",
.device_index = T264_PVAV0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_pvav0_base_r(),
.end_abs_pa = addr_map_rpg_pm_pvav0_limit_r(),
.start_pa = addr_map_rpg_pm_pvav0_base_r(),
.end_pa = addr_map_rpg_pm_pvav0_limit_r(),
.base_pa = addr_map_rpg_grp_vision_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 2U,
.element_index_mask = BIT(0),
.element_index = 2U,
.dt_mmio = NULL,
.name = "perfmon_pvav1",
.device_index = T264_PVAV1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_pvav1_base_r(),
.end_abs_pa = addr_map_rpg_pm_pvav1_limit_r(),
.start_pa = addr_map_rpg_pm_pvav1_base_r(),
.end_pa = addr_map_rpg_pm_pvav1_limit_r(),
.base_pa = addr_map_rpg_grp_vision_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 3U,
.element_index_mask = BIT(0),
.element_index = 3U,
.dt_mmio = NULL,
.name = "perfmon_pvap0",
.device_index = T264_PVAP0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_pvap0_base_r(),
.end_abs_pa = addr_map_rpg_pm_pvap0_limit_r(),
.start_pa = addr_map_rpg_pm_pvap0_base_r(),
.end_pa = addr_map_rpg_pm_pvap0_limit_r(),
.base_pa = addr_map_rpg_grp_vision_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 4U,
.element_index_mask = BIT(0),
.element_index = 4U,
.dt_mmio = NULL,
.name = "perfmon_pvap1",
.device_index = T264_PVAP1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_pvap1_base_r(),
.end_abs_pa = addr_map_rpg_pm_pvap1_limit_r(),
.start_pa = addr_map_rpg_pm_pvap1_base_r(),
.end_pa = addr_map_rpg_pm_pvap1_limit_r(),
.base_pa = addr_map_rpg_grp_vision_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_pva_inst0_perfmux_element_static_array[
T264_HWPM_IP_PVA_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_pva0_pm_base_r(),
.end_abs_pa = addr_map_pva0_pm_limit_r(),
.start_pa = addr_map_pva0_pm_base_r(),
.end_pa = addr_map_pva0_pm_limit_r(),
.base_pa = 0ULL,
.alist = t264_pva_pm_alist,
.alist_size = ARRAY_SIZE(t264_pva_pm_alist),
.fake_registers = NULL,
},
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 1U,
.element_index_mask = BIT(0),
.element_index = 1U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_pva1_pm_base_r(),
.end_abs_pa = addr_map_pva1_pm_limit_r(),
.start_pa = addr_map_pva1_pm_base_r(),
.end_pa = addr_map_pva1_pm_limit_r(),
.base_pa = 0ULL,
.alist = t264_pva_pm_alist,
.alist_size = ARRAY_SIZE(t264_pva_pm_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_pva_inst_static_array[
T264_HWPM_IP_PVA_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_PVA_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_PVA_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_pva_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_pva0_pm_base_r(),
.range_end = addr_map_pva1_pm_limit_r(),
.element_stride = addr_map_pva0_pm_limit_r() -
addr_map_pva0_pm_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_PVA_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_PVA_NUM_PERFMON_PER_INST,
.element_static_array =
t264_pva_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_pvac0_base_r(),
.range_end = addr_map_rpg_pm_pvap1_limit_r(),
.element_stride = addr_map_rpg_pm_pvac0_limit_r() -
addr_map_rpg_pm_pvac0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_VALID,
},
.element_fs_mask = 0U,
.dev_name = "/dev/nvpvadebugfs/pva0/hwpm",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_pva = {
.num_instances = T264_HWPM_IP_PVA_NUM_INSTANCES,
.ip_inst_static_array = t264_pva_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_pva0_pm_base_r(),
.range_end = addr_map_pva1_pm_limit_r(),
.inst_stride = addr_map_pva1_pm_limit_r() -
addr_map_pva0_pm_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_pvac0_base_r(),
.range_end = addr_map_rpg_pm_pvap1_limit_r(),
.inst_stride = addr_map_rpg_pm_pvap1_limit_r() -
addr_map_rpg_pm_pvac0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK |
TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_INVALID,
.reserved = false,
};

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_PVA_H
#define T264_HWPM_IP_PVA_H
#if defined(CONFIG_T264_HWPM_IP_PVA)
#define T264_HWPM_ACTIVE_IP_PVA T264_HWPM_IP_PVA,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_PVA_NUM_INSTANCES 1U
#define T264_HWPM_IP_PVA_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_PVA_NUM_PERFMON_PER_INST 5U
#define T264_HWPM_IP_PVA_NUM_PERFMUX_PER_INST 2U
#define T264_HWPM_IP_PVA_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_pva;
#else
#define T264_HWPM_ACTIVE_IP_PVA
#endif
#endif /* T264_HWPM_IP_PVA_H */

View File

@@ -0,0 +1,264 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "t264_rtr.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
#include <hal/t264/t264_perfmon_device_index.h>
/* RTR aperture should be placed in instance T264_HWPM_IP_RTR_STATIC_RTR_INST */
static struct hwpm_ip_aperture t264_rtr_inst0_perfmux_element_static_array[
T264_HWPM_IP_RTR_NUM_PERFMUX_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMUX,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "rtr",
.device_index = T264_RTR_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rtr_base_r(),
.end_abs_pa = addr_map_rtr_limit_r(),
.start_pa = addr_map_rtr_base_r(),
.end_pa = addr_map_rtr_limit_r(),
.base_pa = addr_map_rtr_base_r(),
.alist = t264_rtr_alist,
.alist_size = ARRAY_SIZE(t264_rtr_alist),
.fake_registers = NULL,
},
};
/* PMA from RTR perspective */
/* PMA aperture should be placed in instance T264_HWPM_IP_RTR_STATIC_PMA_INST */
static struct hwpm_ip_aperture t264_rtr_inst1_perfmux_element_static_array[
T264_HWPM_IP_RTR_NUM_PERFMUX_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMUX,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "pma",
.device_index = T264_PMA_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_pma_base_r(),
.end_abs_pa = addr_map_pma_limit_r(),
.start_pa = addr_map_pma_base_r(),
.end_pa = addr_map_pma_limit_r(),
.base_pa = addr_map_pma_base_r(),
.alist = t264_pma_res_cmd_slice_rtr_alist,
.alist_size = ARRAY_SIZE(t264_pma_res_cmd_slice_rtr_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_rtr_inst_static_array[
T264_HWPM_IP_RTR_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_RTR_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_RTR_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_rtr_inst0_perfmux_element_static_array,
.range_start = addr_map_rtr_base_r(),
.range_end = addr_map_rtr_limit_r(),
.element_stride = addr_map_rtr_limit_r() -
addr_map_rtr_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_RTR_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_RTR_NUM_PERFMON_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = -1,
},
.element_fs_mask = 0x1U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(1),
.num_core_elements_per_inst =
T264_HWPM_IP_RTR_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_RTR_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_rtr_inst1_perfmux_element_static_array,
.range_start = addr_map_pma_base_r(),
.range_end = addr_map_pma_limit_r(),
.element_stride = addr_map_pma_limit_r() -
addr_map_pma_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_RTR_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_RTR_NUM_PERFMON_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = -1,
},
.element_fs_mask = 0x1U,
.dev_name = "",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_rtr = {
.num_instances = T264_HWPM_IP_RTR_NUM_INSTANCES,
.ip_inst_static_array = t264_rtr_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/*
* PMA block is 0x2000 wide and RTR block is 0x1000 wide
* Expected facts:
* - PMA should be referred as a single entity
* - RTR IP instance array should have 2 slots(PMA, RTR)
*
* To ensure that the inst_slots are computed correctly
* as 2 slots, the instance range for perfmux aperture
* needs to be twice the PMA block.
*/
.range_start = addr_map_pma_base_r(),
.range_end = addr_map_pma_limit_r() +
(addr_map_pma_limit_r() -
addr_map_pma_base_r() + 1ULL),
/* Use PMA stride as it is larger block than RTR */
.inst_stride = addr_map_pma_limit_r() -
addr_map_pma_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = 0U,
.override_enable = false,
/* RTR is defined as 2 instance IP corresponding to router and pma */
/* Set this mask to indicate that instances are available */
.inst_fs_mask = 0x3U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_VALID,
.reserved = false,
};

View File

@@ -0,0 +1,43 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef T264_HWPM_IP_RTR_H
#define T264_HWPM_IP_RTR_H
#define T264_HWPM_ACTIVE_IP_RTR T264_HWPM_IP_RTR,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_RTR_NUM_INSTANCES 2U
#define T264_HWPM_IP_RTR_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_RTR_NUM_PERFMON_PER_INST 0U
#define T264_HWPM_IP_RTR_NUM_PERFMUX_PER_INST 1U
#define T264_HWPM_IP_RTR_NUM_BROADCAST_PER_INST 0U
#define T264_HWPM_IP_RTR_STATIC_RTR_INST 0U
#define T264_HWPM_IP_RTR_STATIC_RTR_PERFMUX_INDEX 0U
#define T264_HWPM_IP_RTR_STATIC_PMA_INST 1U
#define T264_HWPM_IP_RTR_STATIC_PMA_PERFMUX_INDEX 0U
extern struct hwpm_ip t264_hwpm_ip_rtr;
#endif /* T264_HWPM_IP_RTR_H */

View File

@@ -0,0 +1,615 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#include "t264_smmu.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_smmu_inst0_perfmon_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucftcu0",
.device_index = T264_UCF_TCU0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_smmu0_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_smmu0_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_smmu0_base_r(),
.end_pa = addr_map_rpg_pm_ucf_smmu0_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst1_perfmon_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucftcu1",
.device_index = T264_UCF_TCU1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_smmu1_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_smmu1_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_smmu1_base_r(),
.end_pa = addr_map_rpg_pm_ucf_smmu1_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst2_perfmon_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucftcu3",
.device_index = T264_UCF_TCU3_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_smmu3_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_smmu3_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_smmu3_base_r(),
.end_pa = addr_map_rpg_pm_ucf_smmu3_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst3_perfmon_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucftcu2",
.device_index = T264_UCF_TCU2_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_smmu2_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_smmu2_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_smmu2_base_r(),
.end_pa = addr_map_rpg_pm_ucf_smmu2_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst4_perfmon_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_dispusbtcu0",
.device_index = T264_DISP_USB_TCU0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_disp_usb_smmu0_base_r(),
.end_abs_pa = addr_map_rpg_pm_disp_usb_smmu0_limit_r(),
.start_pa = addr_map_rpg_pm_disp_usb_smmu0_base_r(),
.end_pa = addr_map_rpg_pm_disp_usb_smmu0_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst0_perfmux_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_smmu2_base_r(),
.end_abs_pa = addr_map_smmu2_limit_r(),
.start_pa = addr_map_smmu2_base_r(),
.end_pa = addr_map_smmu2_limit_r(),
.base_pa = 0ULL,
.alist = t264_smmu_alist,
.alist_size = ARRAY_SIZE(t264_smmu_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst1_perfmux_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_smmu1_base_r(),
.end_abs_pa = addr_map_smmu1_limit_r(),
.start_pa = addr_map_smmu1_base_r(),
.end_pa = addr_map_smmu1_limit_r(),
.base_pa = 0ULL,
.alist = t264_smmu_alist,
.alist_size = ARRAY_SIZE(t264_smmu_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst2_perfmux_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_smmu4_base_r(),
.end_abs_pa = addr_map_smmu4_limit_r(),
.start_pa = addr_map_smmu4_base_r(),
.end_pa = addr_map_smmu4_limit_r(),
.base_pa = 0ULL,
.alist = t264_smmu_alist,
.alist_size = ARRAY_SIZE(t264_smmu_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst3_perfmux_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_smmu0_base_r(),
.end_abs_pa = addr_map_smmu0_limit_r(),
.start_pa = addr_map_smmu0_base_r(),
.end_pa = addr_map_smmu0_limit_r(),
.base_pa = 0ULL,
.alist = t264_smmu_alist,
.alist_size = ARRAY_SIZE(t264_smmu_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_smmu_inst4_perfmux_element_static_array[
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_smmu3_base_r(),
.end_abs_pa = addr_map_smmu3_limit_r(),
.start_pa = addr_map_smmu3_base_r(),
.end_pa = addr_map_smmu3_limit_r(),
.base_pa = 0ULL,
.alist = t264_smmu_alist,
.alist_size = ARRAY_SIZE(t264_smmu_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_smmu_inst_static_array[
T264_HWPM_IP_SMMU_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_SMMU_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_smmu_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_smmu2_base_r(),
.range_end = addr_map_smmu2_limit_r(),
.element_stride = addr_map_smmu2_limit_r() -
addr_map_smmu2_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST,
.element_static_array =
t264_smmu_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_smmu0_base_r(),
.range_end = addr_map_rpg_pm_ucf_smmu0_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_smmu0_limit_r() -
addr_map_rpg_pm_ucf_smmu0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(1),
.num_core_elements_per_inst =
T264_HWPM_IP_SMMU_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_smmu_inst1_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_smmu1_base_r(),
.range_end = addr_map_smmu1_limit_r(),
.element_stride = addr_map_smmu1_limit_r() -
addr_map_smmu1_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST,
.element_static_array =
t264_smmu_inst1_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_smmu1_base_r(),
.range_end = addr_map_rpg_pm_ucf_smmu1_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_smmu1_limit_r() -
addr_map_rpg_pm_ucf_smmu1_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(2),
.num_core_elements_per_inst =
T264_HWPM_IP_SMMU_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_smmu_inst2_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_smmu4_base_r(),
.range_end = addr_map_smmu4_limit_r(),
.element_stride = addr_map_smmu4_limit_r() -
addr_map_smmu4_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST,
.element_static_array =
t264_smmu_inst2_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_smmu3_base_r(),
.range_end = addr_map_rpg_pm_ucf_smmu3_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_smmu3_limit_r() -
addr_map_rpg_pm_ucf_smmu3_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(3),
.num_core_elements_per_inst =
T264_HWPM_IP_SMMU_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_smmu_inst3_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_smmu0_base_r(),
.range_end = addr_map_smmu0_limit_r(),
.element_stride = addr_map_smmu0_limit_r() -
addr_map_smmu0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST,
.element_static_array =
t264_smmu_inst3_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_smmu2_base_r(),
.range_end = addr_map_rpg_pm_ucf_smmu2_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_smmu2_limit_r() -
addr_map_rpg_pm_ucf_smmu2_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(4),
.num_core_elements_per_inst =
T264_HWPM_IP_SMMU_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_smmu_inst4_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_smmu3_base_r(),
.range_end = addr_map_smmu3_limit_r(),
.element_stride = addr_map_smmu3_limit_r() -
addr_map_smmu3_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST,
.element_static_array =
t264_smmu_inst4_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_disp_usb_smmu0_base_r(),
.range_end = addr_map_rpg_pm_disp_usb_smmu0_limit_r(),
.element_stride = addr_map_rpg_pm_disp_usb_smmu0_limit_r() -
addr_map_rpg_pm_disp_usb_smmu0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_smmu = {
.num_instances = T264_HWPM_IP_SMMU_NUM_INSTANCES,
.ip_inst_static_array = t264_smmu_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_smmu1_base_r(),
.range_end = addr_map_smmu3_limit_r(),
.inst_stride = addr_map_smmu1_limit_r() -
addr_map_smmu1_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_ucf_smmu0_base_r(),
.range_end = addr_map_rpg_pm_disp_usb_smmu0_limit_r(),
.inst_stride = addr_map_rpg_pm_ucf_smmu0_limit_r() -
addr_map_rpg_pm_ucf_smmu0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK | TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_INVALID,
.reserved = false,
};

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_SMMU_H
#define T264_HWPM_IP_SMMU_H
#if defined(CONFIG_T264_HWPM_IP_SMMU)
#define T264_HWPM_ACTIVE_IP_SMMU T264_HWPM_IP_SMMU,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_SMMU_NUM_INSTANCES 5U
#define T264_HWPM_IP_SMMU_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_SMMU_NUM_PERFMON_PER_INST 1U
#define T264_HWPM_IP_SMMU_NUM_PERFMUX_PER_INST 1U
#define T264_HWPM_IP_SMMU_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_smmu;
#else
#define T264_HWPM_ACTIVE_IP_SMMU
#endif
#endif /* T264_HWPM_IP_SMMU_H */

View File

@@ -0,0 +1,300 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#include "t264_ucf_csw.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_ucf_csw_inst0_perfmon_element_static_array[
T264_HWPM_IP_UCF_CSW_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucfcsw0",
.device_index = T264_UCF_CSW0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_vddmss0_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_vddmss0_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_vddmss0_base_r(),
.end_pa = addr_map_rpg_pm_ucf_vddmss0_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_csw_inst1_perfmon_element_static_array[
T264_HWPM_IP_UCF_CSW_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucfcsw1",
.device_index = T264_UCF_CSW1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_vddmss1_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_vddmss1_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_vddmss1_base_r(),
.end_pa = addr_map_rpg_pm_ucf_vddmss1_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_csw_inst0_perfmux_element_static_array[
T264_HWPM_IP_UCF_CSW_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_ucf_csw0_base_r(),
.end_abs_pa = addr_map_ucf_csw0_limit_r(),
.start_pa = addr_map_ucf_csw0_base_r(),
.end_pa = addr_map_ucf_csw0_limit_r(),
.base_pa = 0ULL,
.alist = t264_ucf_csw_alist,
.alist_size = ARRAY_SIZE(t264_ucf_csw_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_csw_inst1_perfmux_element_static_array[
T264_HWPM_IP_UCF_CSW_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_ucf_csw1_base_r(),
.end_abs_pa = addr_map_ucf_csw1_limit_r(),
.start_pa = addr_map_ucf_csw1_base_r(),
.end_pa = addr_map_ucf_csw1_limit_r(),
.base_pa = 0ULL,
.alist = t264_ucf_csw_alist,
.alist_size = ARRAY_SIZE(t264_ucf_csw_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_ucf_csw_inst_static_array[
T264_HWPM_IP_UCF_CSW_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_UCF_CSW_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_CSW_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_ucf_csw_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_ucf_csw0_base_r(),
.range_end = addr_map_ucf_csw0_limit_r(),
.element_stride = addr_map_ucf_csw0_limit_r() -
addr_map_ucf_csw0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_CSW_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_CSW_NUM_PERFMON_PER_INST,
.element_static_array =
t264_ucf_csw_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_vddmss0_base_r(),
.range_end = addr_map_rpg_pm_ucf_vddmss0_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_vddmss0_limit_r() -
addr_map_rpg_pm_ucf_vddmss0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(1),
.num_core_elements_per_inst =
T264_HWPM_IP_UCF_CSW_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_CSW_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_ucf_csw_inst1_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_ucf_csw1_base_r(),
.range_end = addr_map_ucf_csw1_limit_r(),
.element_stride = addr_map_ucf_csw1_limit_r() -
addr_map_ucf_csw1_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_CSW_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_CSW_NUM_PERFMON_PER_INST,
.element_static_array =
t264_ucf_csw_inst1_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_vddmss1_base_r(),
.range_end = addr_map_rpg_pm_ucf_vddmss1_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_vddmss1_limit_r() -
addr_map_rpg_pm_ucf_vddmss1_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_ucf_csw = {
.num_instances = T264_HWPM_IP_UCF_CSW_NUM_INSTANCES,
.ip_inst_static_array = t264_ucf_csw_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_ucf_csw0_base_r(),
.range_end = addr_map_ucf_csw1_limit_r(),
.inst_stride = addr_map_ucf_csw0_limit_r() -
addr_map_ucf_csw0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_ucf_vddmss0_base_r(),
.range_end = addr_map_rpg_pm_ucf_vddmss1_limit_r(),
.inst_stride = addr_map_rpg_pm_ucf_vddmss0_limit_r() -
addr_map_rpg_pm_ucf_vddmss0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK | TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_INVALID,
.reserved = false,
};

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_UCF_CSW_H
#define T264_HWPM_IP_UCF_CSW_H
#if defined(CONFIG_T264_HWPM_IP_UCF_CSW)
#define T264_HWPM_ACTIVE_IP_UCF_CSW T264_HWPM_IP_UCF_CSW,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_UCF_CSW_NUM_INSTANCES 2U
#define T264_HWPM_IP_UCF_CSW_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_UCF_CSW_NUM_PERFMON_PER_INST 1U
#define T264_HWPM_IP_UCF_CSW_NUM_PERFMUX_PER_INST 1U
#define T264_HWPM_IP_UCF_CSW_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_ucf_csw;
#else
#define T264_HWPM_ACTIVE_IP_UCF_CSW
#endif
#endif /* T264_HWPM_IP_UCF_CSW_H */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_UCF_MSW_H
#define T264_HWPM_IP_UCF_MSW_H
#if defined(CONFIG_T264_HWPM_IP_UCF_MSW)
#define T264_HWPM_ACTIVE_IP_UCF_MSW T264_HWPM_IP_UCF_MSW,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_UCF_MSW_NUM_INSTANCES 8U
#define T264_HWPM_IP_UCF_MSW_NUM_CORE_ELEMENT_PER_INST 2U
#define T264_HWPM_IP_UCF_MSW_NUM_PERFMON_PER_INST 2U
#define T264_HWPM_IP_UCF_MSW_NUM_PERFMUX_PER_INST 6U
#define T264_HWPM_IP_UCF_MSW_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_ucf_msw;
#else
#define T264_HWPM_ACTIVE_IP_UCF_MSW
#endif
#endif /* T264_HWPM_IP_UCF_MSW_H */

View File

@@ -0,0 +1,510 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#include "t264_ucf_psw.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_ucf_psw_inst0_perfmon_element_static_array[
T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucfpsw0",
.device_index = T264_UCF_PSW0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_psw0_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_psw0_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_psw0_base_r(),
.end_pa = addr_map_rpg_pm_ucf_psw0_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_psw_inst1_perfmon_element_static_array[
T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucfpsw1",
.device_index = T264_UCF_PSW1_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_psw1_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_psw1_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_psw1_base_r(),
.end_pa = addr_map_rpg_pm_ucf_psw1_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_psw_inst2_perfmon_element_static_array[
T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucfpsw2",
.device_index = T264_UCF_PSW2_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_psw2_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_psw2_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_psw2_base_r(),
.end_pa = addr_map_rpg_pm_ucf_psw2_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_psw_inst3_perfmon_element_static_array[
T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_ucfpsw3",
.device_index = T264_UCF_PSW3_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_ucf_psw3_base_r(),
.end_abs_pa = addr_map_rpg_pm_ucf_psw3_limit_r(),
.start_pa = addr_map_rpg_pm_ucf_psw3_base_r(),
.end_pa = addr_map_rpg_pm_ucf_psw3_limit_r(),
.base_pa = addr_map_rpg_grp_ucf_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_psw_inst0_perfmux_element_static_array[
T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_ucf_psn0_psw_base_r(),
.end_abs_pa = addr_map_ucf_psn0_psw_limit_r(),
.start_pa = addr_map_ucf_psn0_psw_base_r(),
.end_pa = addr_map_ucf_psn0_psw_limit_r(),
.base_pa = 0ULL,
.alist = t264_ucf_psn_psw_alist,
.alist_size = ARRAY_SIZE(t264_ucf_psn_psw_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_psw_inst1_perfmux_element_static_array[
T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_ucf_psn1_psw_base_r(),
.end_abs_pa = addr_map_ucf_psn1_psw_limit_r(),
.start_pa = addr_map_ucf_psn1_psw_base_r(),
.end_pa = addr_map_ucf_psn1_psw_limit_r(),
.base_pa = 0ULL,
.alist = t264_ucf_psn_psw_alist,
.alist_size = ARRAY_SIZE(t264_ucf_psn_psw_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_psw_inst2_perfmux_element_static_array[
T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_ucf_psn2_psw_base_r(),
.end_abs_pa = addr_map_ucf_psn2_psw_limit_r(),
.start_pa = addr_map_ucf_psn2_psw_base_r(),
.end_pa = addr_map_ucf_psn2_psw_limit_r(),
.base_pa = 0ULL,
.alist = t264_ucf_psn_psw_alist,
.alist_size = ARRAY_SIZE(t264_ucf_psn_psw_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_ucf_psw_inst3_perfmux_element_static_array[
T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_ucf_psn3_psw_base_r(),
.end_abs_pa = addr_map_ucf_psn3_psw_limit_r(),
.start_pa = addr_map_ucf_psn3_psw_base_r(),
.end_pa = addr_map_ucf_psn3_psw_limit_r(),
.base_pa = 0ULL,
.alist = t264_ucf_psn_psw_alist,
.alist_size = ARRAY_SIZE(t264_ucf_psn_psw_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_ucf_psw_inst_static_array[
T264_HWPM_IP_UCF_PSW_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_ucf_psw_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_ucf_psn0_psw_base_r(),
.range_end = addr_map_ucf_psn0_psw_limit_r(),
.element_stride = addr_map_ucf_psn0_psw_limit_r() -
addr_map_ucf_psn0_psw_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST,
.element_static_array =
t264_ucf_psw_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_psw0_base_r(),
.range_end = addr_map_rpg_pm_ucf_psw0_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_psw0_limit_r() -
addr_map_rpg_pm_ucf_psw0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(1),
.num_core_elements_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_ucf_psw_inst1_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_ucf_psn1_psw_base_r(),
.range_end = addr_map_ucf_psn1_psw_limit_r(),
.element_stride = addr_map_ucf_psn1_psw_limit_r() -
addr_map_ucf_psn1_psw_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST,
.element_static_array =
t264_ucf_psw_inst1_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_psw1_base_r(),
.range_end = addr_map_rpg_pm_ucf_psw1_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_psw1_limit_r() -
addr_map_rpg_pm_ucf_psw1_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(2),
.num_core_elements_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_ucf_psw_inst2_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_ucf_psn2_psw_base_r(),
.range_end = addr_map_ucf_psn2_psw_limit_r(),
.element_stride = addr_map_ucf_psn2_psw_limit_r() -
addr_map_ucf_psn2_psw_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST,
.element_static_array =
t264_ucf_psw_inst2_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_psw2_base_r(),
.range_end = addr_map_rpg_pm_ucf_psw2_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_psw2_limit_r() -
addr_map_rpg_pm_ucf_psw2_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
{
.hw_inst_mask = BIT(3),
.num_core_elements_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_ucf_psw_inst3_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_ucf_psn3_psw_base_r(),
.range_end = addr_map_ucf_psn3_psw_limit_r(),
.element_stride = addr_map_ucf_psn3_psw_limit_r() -
addr_map_ucf_psn3_psw_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST,
.element_static_array =
t264_ucf_psw_inst3_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_ucf_psw3_base_r(),
.range_end = addr_map_rpg_pm_ucf_psw3_limit_r(),
.element_stride = addr_map_rpg_pm_ucf_psw3_limit_r() -
addr_map_rpg_pm_ucf_psw3_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_INVALID,
},
.element_fs_mask = 0U,
.dev_name = "",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_ucf_psw = {
.num_instances = T264_HWPM_IP_UCF_PSW_NUM_INSTANCES,
.ip_inst_static_array = t264_ucf_psw_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_ucf_psn0_psw_base_r(),
.range_end = addr_map_ucf_psn3_psw_limit_r(),
.inst_stride = addr_map_ucf_psn0_psw_limit_r() -
addr_map_ucf_psn0_psw_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_ucf_psw0_base_r(),
.range_end = addr_map_rpg_pm_ucf_psw3_limit_r(),
.inst_stride = addr_map_rpg_pm_ucf_psw0_limit_r() -
addr_map_rpg_pm_ucf_psw0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK | TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_INVALID,
.reserved = false,
};

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_UCF_PSW_H
#define T264_HWPM_IP_UCF_PSW_H
#if defined(CONFIG_T264_HWPM_IP_UCF_PSW)
#define T264_HWPM_ACTIVE_IP_UCF_PSW T264_HWPM_IP_UCF_PSW,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_UCF_PSW_NUM_INSTANCES 4U
#define T264_HWPM_IP_UCF_PSW_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_UCF_PSW_NUM_PERFMON_PER_INST 1U
#define T264_HWPM_IP_UCF_PSW_NUM_PERFMUX_PER_INST 1U
#define T264_HWPM_IP_UCF_PSW_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_ucf_psw;
#else
#define T264_HWPM_ACTIVE_IP_UCF_PSW
#endif
#endif /* T264_HWPM_IP_UCF_PSW_H */

View File

@@ -0,0 +1,196 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#include "t264_vic.h"
#include <tegra_hwpm.h>
#include <hal/t264/t264_regops_allowlist.h>
#include <hal/t264/t264_perfmon_device_index.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
static struct hwpm_ip_aperture t264_vic_inst0_perfmon_element_static_array[
T264_HWPM_IP_VIC_NUM_PERFMON_PER_INST] = {
{
.element_type = HWPM_ELEMENT_PERFMON,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = "perfmon_vica0",
.device_index = T264_VICA0_PERFMON_DEVICE_NODE_INDEX,
.start_abs_pa = addr_map_rpg_pm_vic0_base_r(),
.end_abs_pa = addr_map_rpg_pm_vic0_limit_r(),
.start_pa = addr_map_rpg_pm_vic0_base_r(),
.end_pa = addr_map_rpg_pm_vic0_limit_r(),
.base_pa = addr_map_rpg_grp_vision_base_r(),
.alist = t264_perfmon_alist,
.alist_size = ARRAY_SIZE(t264_perfmon_alist),
.fake_registers = NULL,
},
};
static struct hwpm_ip_aperture t264_vic_inst0_perfmux_element_static_array[
T264_HWPM_IP_VIC_NUM_PERFMUX_PER_INST] = {
{
.element_type = IP_ELEMENT_PERFMUX,
.aperture_index = 0U,
.element_index_mask = BIT(0),
.element_index = 0U,
.dt_mmio = NULL,
.name = {'\0'},
.start_abs_pa = addr_map_vic_base_r(),
.end_abs_pa = addr_map_vic_limit_r(),
.start_pa = addr_map_vic_base_r(),
.end_pa = addr_map_vic_limit_r(),
.base_pa = 0ULL,
.alist = t264_vic_alist,
.alist_size = ARRAY_SIZE(t264_vic_alist),
.fake_registers = NULL,
},
};
/* IP instance array */
static struct hwpm_ip_inst t264_vic_inst_static_array[
T264_HWPM_IP_VIC_NUM_INSTANCES] = {
{
.hw_inst_mask = BIT(0),
.num_core_elements_per_inst =
T264_HWPM_IP_VIC_NUM_CORE_ELEMENT_PER_INST,
.element_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
.num_element_per_inst =
T264_HWPM_IP_VIC_NUM_PERFMUX_PER_INST,
.element_static_array =
t264_vic_inst0_perfmux_element_static_array,
/* NOTE: range should be in ascending order */
.range_start = addr_map_vic_base_r(),
.range_end = addr_map_vic_limit_r(),
.element_stride = addr_map_vic_limit_r() -
addr_map_vic_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.num_element_per_inst =
T264_HWPM_IP_VIC_NUM_BROADCAST_PER_INST,
.element_static_array = NULL,
.range_start = 0ULL,
.range_end = 0ULL,
.element_stride = 0ULL,
.element_slots = 0U,
.element_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.num_element_per_inst =
T264_HWPM_IP_VIC_NUM_PERFMON_PER_INST,
.element_static_array =
t264_vic_inst0_perfmon_element_static_array,
.range_start = addr_map_rpg_pm_vic0_base_r(),
.range_end = addr_map_rpg_pm_vic0_limit_r(),
.element_stride = addr_map_rpg_pm_vic0_limit_r() -
addr_map_rpg_pm_vic0_base_r() + 1ULL,
.element_slots = 0U,
.element_arr = NULL,
},
},
.ip_ops = {
.ip_dev = NULL,
.hwpm_ip_pm = NULL,
.hwpm_ip_reg_op = NULL,
.fd = TEGRA_HWPM_IP_DEBUG_FD_VALID,
},
.element_fs_mask = 0U,
.dev_name = "/dev/nvhost-debug/vic_hwpm",
},
};
/* IP structure */
struct hwpm_ip t264_hwpm_ip_vic = {
.num_instances = T264_HWPM_IP_VIC_NUM_INSTANCES,
.ip_inst_static_array = t264_vic_inst_static_array,
.inst_aperture_info = {
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMUX
*/
{
/* NOTE: range should be in ascending order */
.range_start = addr_map_vic_base_r(),
.range_end = addr_map_vic_limit_r(),
.inst_stride = addr_map_vic_limit_r() -
addr_map_vic_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_BROADCAST
*/
{
.range_start = 0ULL,
.range_end = 0ULL,
.inst_stride = 0ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
/*
* Instance info corresponding to
* TEGRA_HWPM_APERTURE_TYPE_PERFMON
*/
{
.range_start = addr_map_rpg_pm_vic0_base_r(),
.range_end = addr_map_rpg_pm_vic0_limit_r(),
.inst_stride = addr_map_rpg_pm_vic0_limit_r() -
addr_map_rpg_pm_vic0_base_r() + 1ULL,
.inst_slots = 0U,
.inst_arr = NULL,
},
},
.dependent_fuse_mask = TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK |
TEGRA_HWPM_FUSE_OPT_HWPM_DISABLE_MASK,
.override_enable = false,
.inst_fs_mask = 0U,
.resource_status = TEGRA_HWPM_RESOURCE_STATUS_INVALID,
.reserved = false,
};

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* This is a generated file. Do not edit.
*
* Steps to regenerate:
* python3 ip_files_generator.py <soc_chip> <IP_name> [<dir_name>]
*/
#ifndef T264_HWPM_IP_VIC_H
#define T264_HWPM_IP_VIC_H
#if defined(CONFIG_T264_HWPM_IP_VIC)
#define T264_HWPM_ACTIVE_IP_VIC T264_HWPM_IP_VIC,
/* This data should ideally be available in HW headers */
#define T264_HWPM_IP_VIC_NUM_INSTANCES 1U
#define T264_HWPM_IP_VIC_NUM_CORE_ELEMENT_PER_INST 1U
#define T264_HWPM_IP_VIC_NUM_PERFMON_PER_INST 1U
#define T264_HWPM_IP_VIC_NUM_PERFMUX_PER_INST 1U
#define T264_HWPM_IP_VIC_NUM_BROADCAST_PER_INST 0U
extern struct hwpm_ip t264_hwpm_ip_vic;
#else
#define T264_HWPM_ACTIVE_IP_VIC
#endif
#endif /* T264_HWPM_IP_VIC_H */

View File

@@ -0,0 +1,546 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <tegra_hwpm_timers.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
#include <hal/t264/t264_internal.h>
#include <hal/t264/ip/rtr/t264_rtr.h>
#include <hal/t264/hw/t264_pmasys_soc_hwpm.h>
#include <hal/t264/hw/t264_pmmsys_soc_hwpm.h>
#define T264_HWPM_ENGINE_INDEX_GPMA0 3U
#define T264_HWPM_ENGINE_INDEX_GPMA1 4U
#define T264_HWPM_ENGINE_INDEX_PMA 8U
int t264_hwpm_get_rtr_pma_perfmux_ptr(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture **rtr_perfmux_ptr,
struct hwpm_ip_aperture **pma_perfmux_ptr)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[
active_chip->get_rtr_int_idx()];
struct hwpm_ip_inst *ip_inst_rtr = &chip_ip->ip_inst_static_array[
T264_HWPM_IP_RTR_STATIC_RTR_INST];
struct hwpm_ip_inst *ip_inst_pma = &chip_ip->ip_inst_static_array[
T264_HWPM_IP_RTR_STATIC_PMA_INST];
if (rtr_perfmux_ptr != NULL) {
*rtr_perfmux_ptr = &ip_inst_rtr->element_info[
TEGRA_HWPM_APERTURE_TYPE_PERFMUX].element_static_array[
T264_HWPM_IP_RTR_STATIC_RTR_PERFMUX_INDEX];
}
if (pma_perfmux_ptr != NULL) {
*pma_perfmux_ptr = &ip_inst_pma->element_info[
TEGRA_HWPM_APERTURE_TYPE_PERFMUX].element_static_array[
T264_HWPM_IP_RTR_STATIC_PMA_PERFMUX_INDEX];
}
return 0;
}
int t264_hwpm_check_status(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reg_val = 0U;
struct hwpm_ip_aperture *rtr_perfmux = NULL;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, &rtr_perfmux,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
/* Check ROUTER state */
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_enginestatus_r(), &reg_val);
hwpm_assert_print(hwpm,
pmmsys_router_enginestatus_status_v(reg_val) ==
pmmsys_router_enginestatus_status_empty_v(),
return -EINVAL, "Router not ready value 0x%x", reg_val);
/* Check PMA state */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_status_r(0, 0), &reg_val);
hwpm_assert_print(hwpm,
(reg_val & pmasys_channel_status_engine_status_m()) ==
pmasys_channel_status_engine_status_empty_f(),
return -EINVAL, "PMA not ready value 0x%x", reg_val);
return 0;
}
int t264_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reg_val = 0U;
u32 retries = 10U;
u32 sleep_msecs = 100U;
struct hwpm_ip_aperture *rtr_perfmux = NULL;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, &rtr_perfmux, &pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
/* Disable PMA triggers */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_command_slice_trigger_config_user_r(0), &reg_val);
reg_val = set_field(reg_val,
pmasys_command_slice_trigger_config_user_pma_pulse_m(),
pmasys_command_slice_trigger_config_user_pma_pulse_disable_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_config_user_r(0), reg_val);
/* Reset TRIGGER_START_MASK registers */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_command_slice_trigger_start_mask0_r(0), &reg_val);
reg_val = set_field(reg_val,
pmasys_command_slice_trigger_start_mask0_engine_m(),
pmasys_command_slice_trigger_start_mask0_engine_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_start_mask0_r(0), reg_val);
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_command_slice_trigger_start_mask1_r(0), &reg_val);
reg_val = set_field(reg_val,
pmasys_command_slice_trigger_start_mask1_engine_m(),
pmasys_command_slice_trigger_start_mask1_engine_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_start_mask1_r(0), reg_val);
/* Reset TRIGGER_STOP_MASK registers */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_command_slice_trigger_stop_mask0_r(0), &reg_val);
reg_val = set_field(reg_val,
pmasys_command_slice_trigger_stop_mask0_engine_m(),
pmasys_command_slice_trigger_stop_mask0_engine_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_stop_mask0_r(0), reg_val);
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_command_slice_trigger_stop_mask1_r(0), &reg_val);
reg_val = set_field(reg_val,
pmasys_command_slice_trigger_stop_mask1_engine_m(),
pmasys_command_slice_trigger_stop_mask1_engine_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_stop_mask1_r(0), reg_val);
/* Wait for PERFMONs to idle */
tegra_hwpm_timeout_print(hwpm, retries, sleep_msecs, rtr_perfmux,
pmmsys_router_enginestatus_r(), &reg_val,
(pmmsys_router_enginestatus_merged_perfmon_status_v(
reg_val) != 0U),
"PMMSYS_ROUTER_ENGINESTATUS_PERFMON_STATUS timed out");
/* Wait for ROUTER to idle */
tegra_hwpm_timeout_print(hwpm, retries, sleep_msecs, rtr_perfmux,
pmmsys_router_enginestatus_r(), &reg_val,
(pmmsys_router_enginestatus_status_v(reg_val) !=
pmmsys_router_enginestatus_status_empty_v()),
"PMMSYS_ROUTER_ENGINESTATUS_STATUS timed out");
/* Wait for PMA to idle */
tegra_hwpm_timeout_print(hwpm, retries, sleep_msecs, pma_perfmux,
pmasys_channel_status_r(0, 0), &reg_val,
((reg_val & pmasys_channel_status_engine_status_m()) !=
pmasys_channel_status_engine_status_empty_f()),
"PMASYS_CHANNEL_STATUS timed out");
return err;
}
int t264_hwpm_init_prod_values(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reg_val = 0U;
struct hwpm_ip_aperture *rtr_perfmux = NULL;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, &rtr_perfmux,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_config_user_r(0,0), &reg_val);
reg_val = set_field(reg_val,
pmasys_channel_config_user_coalesce_timeout_cycles_m(),
pmasys_channel_config_user_coalesce_timeout_cycles__prod_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_config_user_r(0,0), reg_val);
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_profiling_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val, pmasys_profiling_cg2_secure_slcg_m(),
pmasys_profiling_cg2_secure_slcg__prod_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_profiling_cg2_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_profiling_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val, pmasys_profiling_cg1_secure_flcg_m(),
pmasys_profiling_cg1_secure_flcg__prod_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_profiling_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_dg_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_dg_cg1_secure_flcg_m(),
pmmsys_router_profiling_dg_cg1_secure_flcg__prod_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_dg_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_cg1_secure_flcg_m(),
pmmsys_router_profiling_cg1_secure_flcg__prod_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_perfmon_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_perfmon_cg2_secure_slcg_m(),
pmmsys_router_perfmon_cg2_secure_slcg__prod_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_perfmon_cg2_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_cg2_secure_slcg_m(),
pmmsys_router_profiling_cg2_secure_slcg__prod_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg2_secure_r(), reg_val);
return 0;
}
int t264_hwpm_disable_cg(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reg_val = 0U;
struct hwpm_ip_aperture *rtr_perfmux = NULL;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, &rtr_perfmux,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_profiling_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val, pmasys_profiling_cg2_secure_slcg_m(),
pmasys_profiling_cg2_secure_slcg_disabled_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_profiling_cg2_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_profiling_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val, pmasys_profiling_cg1_secure_flcg_m(),
pmasys_profiling_cg1_secure_flcg_disabled_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_profiling_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_dg_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_dg_cg1_secure_flcg_m(),
pmmsys_router_profiling_dg_cg1_secure_flcg_disabled_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_dg_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_cg1_secure_flcg_m(),
pmmsys_router_profiling_cg1_secure_flcg_disabled_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_perfmon_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_perfmon_cg2_secure_slcg_m(),
pmmsys_router_perfmon_cg2_secure_slcg_disabled_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_perfmon_cg2_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_cg2_secure_slcg_m(),
pmmsys_router_profiling_cg2_secure_slcg_disabled_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg2_secure_r(), reg_val);
return 0;
}
int t264_hwpm_enable_cg(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reg_val = 0U;
struct hwpm_ip_aperture *rtr_perfmux = NULL;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, &rtr_perfmux,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_profiling_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val, pmasys_profiling_cg2_secure_slcg_m(),
pmasys_profiling_cg2_secure_slcg_enabled_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_profiling_cg2_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_profiling_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val, pmasys_profiling_cg1_secure_flcg_m(),
pmasys_profiling_cg1_secure_flcg_enabled_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_profiling_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_dg_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_dg_cg1_secure_flcg_m(),
pmmsys_router_profiling_dg_cg1_secure_flcg_enabled_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_dg_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg1_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_cg1_secure_flcg_m(),
pmmsys_router_profiling_cg1_secure_flcg_enabled_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg1_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_perfmon_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_perfmon_cg2_secure_slcg_m(),
pmmsys_router_perfmon_cg2_secure_slcg_enabled_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_perfmon_cg2_secure_r(), reg_val);
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg2_secure_r(), &reg_val);
reg_val = set_field(reg_val,
pmmsys_router_profiling_cg2_secure_slcg_m(),
pmmsys_router_profiling_cg2_secure_slcg_enabled_f());
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_router_profiling_cg2_secure_r(), reg_val);
return 0;
}
int t264_hwpm_credit_program(struct tegra_soc_hwpm *hwpm,
u32 *num_credits, u8 cblock_idx, u8 pma_channel_idx,
uint16_t credit_cmd)
{
int err = 0;
u32 reg_val = 0U;
struct hwpm_ip_aperture *pma_perfmux = NULL;
struct hwpm_ip_aperture *rtr_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, &rtr_perfmux,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr, pma perfmux failed");
switch (credit_cmd) {
case TEGRA_HWPM_CMD_SET_HS_CREDITS:
/* Write credits information */
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_user_channel_config_secure_r(
cblock_idx, pma_channel_idx),
&reg_val);
reg_val = set_field(reg_val,
pmmsys_user_channel_config_secure_hs_credits_m(),
*num_credits);
tegra_hwpm_writel(hwpm, rtr_perfmux,
pmmsys_user_channel_config_secure_r(
cblock_idx, pma_channel_idx),
reg_val);
break;
case TEGRA_HWPM_CMD_GET_HS_CREDITS:
/* Read credits information */
tegra_hwpm_readl(hwpm, rtr_perfmux,
pmmsys_user_channel_config_secure_r(
cblock_idx, pma_channel_idx),
num_credits);
break;
case TEGRA_HWPM_CMD_GET_TOTAL_HS_CREDITS:
/* read the total HS Credits */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_streaming_capabilities1_r(), &reg_val);
*num_credits = pmasys_streaming_capabilities1_total_credits_v(
reg_val);
break;
case TEGRA_HWPM_CMD_GET_CHIPLET_HS_CREDITS_POOL:
/* Defined for future chips */
tegra_hwpm_err(hwpm,
"TEGRA_SOC_HWPM_CMD_GET_CHIPLET_HS_CREDIT_POOL"
" not supported");
err = -EINVAL;
break;
case TEGRA_HWPM_CMD_GET_HS_CREDITS_MAPPING:
/* Defined for future chips */
tegra_hwpm_err(hwpm,
"TEGRA_SOC_HWPM_CMD_GET_HS_CREDIT_MAPPING"
" not supported");
err = -EINVAL;
break;
default:
tegra_hwpm_err(hwpm, "Invalid Credit Programming State (%d)",
credit_cmd);
err = -EINVAL;
break;
}
return err;
}
int t264_hwpm_setup_trigger(struct tegra_soc_hwpm *hwpm,
u8 enable_cross_trigger, u8 session_type)
{
int err = 0;
u32 trigger_mask_secure0 = 0U;
u32 record_select_secure = 0U;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get pma perfmux failed");
/*
* Case 1: profiler, cross-trigger enabled, GPU->SoC
* - Action: enable incoming start-stop trigger from GPU PMA
* - GPU PMA Action: enable outgoing trigger from GPU PMA,
* trigger type doesn't matter on GPU side
*
* Case 2: sampler, cross-trigger enabled, GPU->SoC
* - Action: enable incoming periodic trigger from GPU PMA
* - GPU PMA Action: enable outgoing trigger from GPU PMA,
* trigger type doesn't matter on GPU side
*
* Case 3: profiler, cross-trigger enabled, SoC->GPU
* - Action: enable outgoing trigger from SoC PMA,
* trigger type doesn't matter on SoC side
* - GPU PMA Action: configure incoming start-stop trigger from SoC PMA
*
* Case 4: sampler, cross-trigger enabled, SoC->GPU
* - Action: enable outgoing trigger from SoC PMA,
* trigger type doesn't matter on SoC side
* - GPU PMA Action: configure incoming periodic trigger from SoC PMA
*
* Case 5: profiler, cross-trigger disabled
* - Action: enable own trigger from SoC PMA,
* trigger type doesn't matter
* - GPU PMA Action: enable own trigger from GPU PMA,
* trigger type doesn't matter)
*
* Case 6: sampler, cross-trigger disabled
* - Action: enable own trigger from SoC PMA,
* trigger type doesn't matter
* - GPU PMA Action: enable own trigger from GPU PMA,
* trigger type doesn't matter
*/
if (!enable_cross_trigger) {
/*
* Handle Case-3 to Case-6
*/
trigger_mask_secure0 = BIT(T264_HWPM_ENGINE_INDEX_PMA);
record_select_secure = T264_HWPM_ENGINE_INDEX_PMA;
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_mask_secure0_r(0),
trigger_mask_secure0);
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_record_select_secure_r(0),
record_select_secure);
return err;
}
switch (session_type) {
case TEGRA_HWPM_CMD_PERIODIC_SESSION:
/*
* Handle Case-1
*/
trigger_mask_secure0 = BIT(T264_HWPM_ENGINE_INDEX_GPMA1);
record_select_secure = T264_HWPM_ENGINE_INDEX_GPMA1;
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_mask_secure0_r(0),
trigger_mask_secure0);
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_record_select_secure_r(0),
record_select_secure);
break;
case TEGRA_HWPM_CMD_START_STOP_SESSION:
/*
* Handle Case-2
*/
trigger_mask_secure0 = BIT(T264_HWPM_ENGINE_INDEX_GPMA0);
record_select_secure = T264_HWPM_ENGINE_INDEX_GPMA0;
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_mask_secure0_r(0),
trigger_mask_secure0);
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_record_select_secure_r(0),
record_select_secure);
break;
case TEGRA_HWPM_CMD_INVALID_SESSION:
default:
tegra_hwpm_err(hwpm, "Invalid Session type");
err = -EINVAL;
break;
}
return err;
}

View File

@@ -0,0 +1,31 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef T264_HWPM_INIT_H
#define T264_HWPM_INIT_H
struct tegra_soc_hwpm;
int t264_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm);
#endif /* T264_HWPM_INIT_H */

View File

@@ -0,0 +1,334 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <tegra_hwpm_clk_rst.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm.h>
#include <hal/t264/t264_init.h>
#include <hal/t264/t264_internal.h>
static struct tegra_soc_hwpm_chip t264_chip_info = {
.la_clk_rate = 648000000,
.chip_ips = NULL,
/* HALs */
.validate_secondary_hals = t264_hwpm_validate_secondary_hals,
/* Clocks-Resets */
.clk_rst_prepare = tegra_hwpm_clk_rst_prepare,
.clk_rst_set_rate_enable = tegra_hwpm_clk_rst_set_rate_enable,
.clk_rst_disable = tegra_hwpm_clk_rst_disable,
.clk_rst_release = tegra_hwpm_clk_rst_release,
/* IP */
.is_ip_active = t264_hwpm_is_ip_active,
.is_resource_active = t264_hwpm_is_resource_active,
.get_rtr_int_idx = t264_get_rtr_int_idx,
.get_ip_max_idx = t264_get_ip_max_idx,
.get_rtr_pma_perfmux_ptr = t264_hwpm_get_rtr_pma_perfmux_ptr,
.extract_ip_ops = t264_hwpm_extract_ip_ops,
.force_enable_ips = t264_hwpm_force_enable_ips,
.validate_current_config = t264_hwpm_validate_current_config,
.get_fs_info = tegra_hwpm_get_fs_info,
.get_resource_info = tegra_hwpm_get_resource_info,
/* Clock gating */
.init_prod_values = t264_hwpm_init_prod_values,
.disable_cg = t264_hwpm_disable_cg,
.enable_cg = t264_hwpm_enable_cg,
/* Secure register programming */
.credit_program = t264_hwpm_credit_program,
.setup_trigger = t264_hwpm_setup_trigger,
/* Resource reservation */
.reserve_rtr = tegra_hwpm_reserve_rtr,
.release_rtr = tegra_hwpm_release_rtr,
/* Aperture */
.perfmon_enable = t264_hwpm_perfmon_enable,
.perfmon_disable = t264_hwpm_perfmon_disable,
.perfmux_disable = tegra_hwpm_perfmux_disable,
.disable_triggers = t264_hwpm_disable_triggers,
.check_status = t264_hwpm_check_status,
/* Memory management */
.disable_mem_mgmt = t264_hwpm_disable_mem_mgmt,
.enable_mem_mgmt = t264_hwpm_enable_mem_mgmt,
.invalidate_mem_config = t264_hwpm_invalidate_mem_config,
.stream_mem_bytes = t264_hwpm_stream_mem_bytes,
.disable_pma_streaming = t264_hwpm_disable_pma_streaming,
.update_mem_bytes_get_ptr = t264_hwpm_update_mem_bytes_get_ptr,
.get_mem_bytes_put_ptr = t264_hwpm_get_mem_bytes_put_ptr,
.membuf_overflow_status = t264_hwpm_membuf_overflow_status,
/* Allowlist */
.get_alist_buf_size = tegra_hwpm_get_alist_buf_size,
.zero_alist_regs = tegra_hwpm_zero_alist_regs,
.copy_alist = tegra_hwpm_copy_alist,
.check_alist = tegra_hwpm_check_alist,
};
bool t264_hwpm_validate_secondary_hals(struct tegra_soc_hwpm *hwpm)
{
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip->clk_rst_prepare == NULL) {
tegra_hwpm_err(hwpm, "clk_rst_prepare HAL uninitialized");
return false;
}
if (hwpm->active_chip->clk_rst_set_rate_enable == NULL) {
tegra_hwpm_err(hwpm,
"clk_rst_set_rate_enable HAL uninitialized");
return false;
}
if (hwpm->active_chip->clk_rst_disable == NULL) {
tegra_hwpm_err(hwpm, "clk_rst_disable HAL uninitialized");
return false;
}
if (hwpm->active_chip->clk_rst_release == NULL) {
tegra_hwpm_err(hwpm, "clk_rst_release HAL uninitialized");
return false;
}
if (hwpm->active_chip->credit_program == NULL) {
tegra_hwpm_err(hwpm, "credit_program HAL uninitialized");
return false;
}
if (hwpm->active_chip->setup_trigger == NULL) {
tegra_hwpm_err(hwpm, "setup_trigger HAL uninitialized");
return false;
}
return true;
}
bool t264_hwpm_is_ip_active(struct tegra_soc_hwpm *hwpm,
u32 ip_enum, u32 *config_ip_index)
{
u32 config_ip = TEGRA_HWPM_IP_INACTIVE;
switch (ip_enum) {
#if defined(CONFIG_T264_HWPM_IP_VIC)
case TEGRA_HWPM_IP_VIC:
config_ip = T264_HWPM_IP_VIC;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
case TEGRA_HWPM_IP_MSS_CHANNEL:
config_ip = T264_HWPM_IP_MSS_CHANNEL;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_PVA)
case TEGRA_HWPM_IP_PVA:
config_ip = T264_HWPM_IP_PVA;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_MSS_HUBS)
case TEGRA_HWPM_IP_MSS_HUB:
config_ip = T264_HWPM_IP_MSS_HUBS;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_OCU)
case TEGRA_HWPM_IP_MCF_OCU:
config_ip = T264_HWPM_IP_OCU;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_SMMU)
case TEGRA_HWPM_IP_SMMU:
config_ip = T264_HWPM_IP_SMMU;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_UCF_MSW)
case TEGRA_HWPM_IP_UCF_MSW:
config_ip = T264_HWPM_IP_UCF_MSW;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_UCF_PSW)
case TEGRA_HWPM_IP_UCF_PSW:
config_ip = T264_HWPM_IP_UCF_PSW;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_UCF_CSW)
case TEGRA_HWPM_IP_UCF_CSW:
config_ip = T264_HWPM_IP_UCF_CSW;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_CPU)
case TEGRA_HWPM_IP_CPU:
config_ip = T264_HWPM_IP_CPU;
#endif
break;
default:
tegra_hwpm_err(hwpm, "Queried enum tegra_hwpm_ip %d invalid",
ip_enum);
break;
}
*config_ip_index = config_ip;
return (config_ip != TEGRA_HWPM_IP_INACTIVE);
}
bool t264_hwpm_is_resource_active(struct tegra_soc_hwpm *hwpm,
u32 res_enum, u32 *config_ip_index)
{
u32 config_ip = TEGRA_HWPM_IP_INACTIVE;
switch (res_enum) {
#if defined(CONFIG_T264_HWPM_IP_VIC)
case TEGRA_HWPM_RESOURCE_VIC:
config_ip = T264_HWPM_IP_VIC;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
case TEGRA_HWPM_RESOURCE_MSS_CHANNEL:
config_ip = T264_HWPM_IP_MSS_CHANNEL;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_PVA)
case TEGRA_HWPM_IP_PVA:
config_ip = T264_HWPM_IP_PVA;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_MSS_HUBS)
case TEGRA_HWPM_RESOURCE_MSS_HUB:
config_ip = T264_HWPM_IP_MSS_HUBS;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_OCU)
case TEGRA_HWPM_RESOURCE_MCF_OCU:
config_ip = T264_HWPM_IP_OCU;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_SMMU)
case TEGRA_HWPM_RESOURCE_SMMU:
config_ip = T264_HWPM_IP_SMMU;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_UCF_MSW)
case TEGRA_HWPM_RESOURCE_UCF_MSW:
config_ip = T264_HWPM_IP_UCF_MSW;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_UCF_PSW)
case TEGRA_HWPM_RESOURCE_UCF_PSW:
config_ip = T264_HWPM_IP_UCF_PSW;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_UCF_CSW)
case TEGRA_HWPM_RESOURCE_UCF_CSW:
config_ip = T264_HWPM_IP_UCF_CSW;
#endif
break;
#if defined(CONFIG_T264_HWPM_IP_CPU)
case TEGRA_HWPM_RESOURCE_CPU:
config_ip = T264_HWPM_IP_CPU;
#endif
break;
case TEGRA_HWPM_RESOURCE_PMA:
config_ip = T264_HWPM_IP_PMA;
break;
case TEGRA_HWPM_RESOURCE_CMD_SLICE_RTR:
config_ip = T264_HWPM_IP_RTR;
break;
default:
tegra_hwpm_err(hwpm, "Queried resource %d invalid",
res_enum);
break;
}
*config_ip_index = config_ip;
return (config_ip != TEGRA_HWPM_IP_INACTIVE);
}
u32 t264_get_rtr_int_idx(void)
{
return T264_HWPM_IP_RTR;
}
u32 t264_get_ip_max_idx(void)
{
return T264_HWPM_IP_MAX;
}
int t264_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm)
{
struct hwpm_ip **t264_active_ip_info;
/* Allocate array of pointers to hold active IP structures */
t264_chip_info.chip_ips = tegra_hwpm_kcalloc(hwpm,
T264_HWPM_IP_MAX, sizeof(struct hwpm_ip *));
/* Add active chip structure link to hwpm super-structure */
hwpm->active_chip = &t264_chip_info;
/* Temporary pointer to make below assignments legible */
t264_active_ip_info = t264_chip_info.chip_ips;
t264_active_ip_info[T264_HWPM_IP_PMA] = &t264_hwpm_ip_pma;
t264_active_ip_info[T264_HWPM_IP_RTR] = &t264_hwpm_ip_rtr;
#if defined(CONFIG_T264_HWPM_IP_VIC)
t264_active_ip_info[T264_HWPM_IP_VIC] = &t264_hwpm_ip_vic;
#endif
#if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
t264_active_ip_info[T264_HWPM_IP_MSS_CHANNEL] =
&t264_hwpm_ip_mss_channel;
#endif
#if defined(CONFIG_T264_HWPM_IP_MSS_HUBS)
t264_active_ip_info[T264_HWPM_IP_MSS_HUBS] =
&t264_hwpm_ip_mss_hubs;
#endif
#if defined(CONFIG_T264_HWPM_IP_PVA)
t264_active_ip_info[T264_HWPM_IP_PVA] = &t264_hwpm_ip_pva;
#endif
#if defined(CONFIG_T264_HWPM_IP_OCU)
t264_active_ip_info[T264_HWPM_IP_OCU] = &t264_hwpm_ip_ocu;
#endif
#if defined(CONFIG_T264_HWPM_IP_SMMU)
t264_active_ip_info[T264_HWPM_IP_SMMU] = &t264_hwpm_ip_smmu;
#endif
#if defined(CONFIG_T264_HWPM_IP_UCF_MSW)
t264_active_ip_info[T264_HWPM_IP_UCF_MSW] = &t264_hwpm_ip_ucf_msw;
#endif
#if defined(CONFIG_T264_HWPM_IP_UCF_PSW)
t264_active_ip_info[T264_HWPM_IP_UCF_PSW] = &t264_hwpm_ip_ucf_psw;
#endif
#if defined(CONFIG_T264_HWPM_IP_UCF_CSW)
t264_active_ip_info[T264_HWPM_IP_UCF_CSW] = &t264_hwpm_ip_ucf_csw;
#endif
#if defined(CONFIG_T264_HWPM_IP_CPU)
t264_active_ip_info[T264_HWPM_IP_CPU] = &t264_hwpm_ip_cpu;
#endif
if (!tegra_hwpm_validate_primary_hals(hwpm)) {
return -EINVAL;
}
return 0;
}

View File

@@ -0,0 +1,115 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef T264_HWPM_INTERNAL_H
#define T264_HWPM_INTERNAL_H
#include <hal/t264/ip/vic/t264_vic.h>
#include <hal/t264/ip/pva/t264_pva.h>
#include <hal/t264/ip/mss_channel/t264_mss_channel.h>
#include <hal/t264/ip/mss_hubs/t264_mss_hubs.h>
#include <hal/t264/ip/ocu/t264_ocu.h>
#include <hal/t264/ip/smmu/t264_smmu.h>
#include <hal/t264/ip/ucf_msw/t264_ucf_msw.h>
#include <hal/t264/ip/ucf_psw/t264_ucf_psw.h>
#include <hal/t264/ip/ucf_csw/t264_ucf_csw.h>
#include <hal/t264/ip/cpu/t264_cpu.h>
#include <hal/t264/ip/pma/t264_pma.h>
#include <hal/t264/ip/rtr/t264_rtr.h>
#undef DEFINE_SOC_HWPM_ACTIVE_IP
#define DEFINE_SOC_HWPM_ACTIVE_IP(name) name
#define T264_HWPM_ACTIVE_IP_MAX T264_HWPM_IP_MAX
#define T264_ACTIVE_IPS \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_PMA) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_RTR) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_VIC) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_PVA) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_MSS_CHANNEL) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_MSS_HUBS) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_OCU) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_SMMU) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_UCF_MSW) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_UCF_PSW) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_UCF_CSW) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_CPU) \
DEFINE_SOC_HWPM_ACTIVE_IP(T264_HWPM_ACTIVE_IP_MAX)
enum t264_hwpm_active_ips {
T264_ACTIVE_IPS
};
#undef DEFINE_SOC_HWPM_ACTIVE_IP
enum tegra_soc_hwpm_ip;
enum tegra_soc_hwpm_resource;
struct tegra_soc_hwpm;
struct hwpm_ip_aperture;
bool t264_hwpm_validate_secondary_hals(struct tegra_soc_hwpm *hwpm);
bool t264_hwpm_is_ip_active(struct tegra_soc_hwpm *hwpm,
u32 ip_enum, u32 *config_ip_index);
bool t264_hwpm_is_resource_active(struct tegra_soc_hwpm *hwpm,
u32 res_enum, u32 *config_ip_index);
u32 t264_get_rtr_int_idx(void);
u32 t264_get_ip_max_idx(void);
int t264_hwpm_get_rtr_pma_perfmux_ptr(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture **rtr_perfmux_ptr,
struct hwpm_ip_aperture **pma_perfmux_ptr);
int t264_hwpm_check_status(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_validate_current_config(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_extract_ip_ops(struct tegra_soc_hwpm *hwpm,
u32 resource_enum, u64 base_address,
struct tegra_hwpm_ip_ops *ip_ops, bool available);
int t264_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_init_prod_values(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_credit_program(struct tegra_soc_hwpm *hwpm,
u32 *num_credits, u8 cblock_idx, u8 pma_channel_idx,
uint16_t credit_cmd);
int t264_hwpm_setup_trigger(struct tegra_soc_hwpm *hwpm,
u8 enable_cross_trigger, u8 session_type);
int t264_hwpm_perfmon_enable(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon);
int t264_hwpm_perfmon_disable(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon);
int t264_hwpm_disable_cg(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_enable_cg(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_disable_mem_mgmt(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_invalidate_mem_config(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_stream_mem_bytes(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_disable_pma_streaming(struct tegra_soc_hwpm *hwpm);
int t264_hwpm_update_mem_bytes_get_ptr(struct tegra_soc_hwpm *hwpm,
u64 mem_bump);
int t264_hwpm_get_mem_bytes_put_ptr(struct tegra_soc_hwpm *hwpm,
u64 *mem_head_ptr);
int t264_hwpm_membuf_overflow_status(struct tegra_soc_hwpm *hwpm,
u32 *overflow_status);
#endif /* T264_HWPM_INTERNAL_H */

View File

@@ -0,0 +1,637 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <tegra_hwpm_static_analysis.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_soc.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
#include <hal/t264/t264_internal.h>
#include <hal/t264/hw/t264_addr_map_soc_hwpm.h>
/*
* This function is invoked by register_ip API.
* Convert the external resource enum to internal IP index.
* Extract given ip_ops and update corresponding IP structure.
*/
int t264_hwpm_extract_ip_ops(struct tegra_soc_hwpm *hwpm,
u32 resource_enum, u64 base_address,
struct tegra_hwpm_ip_ops *ip_ops, bool available)
{
int ret = 0;
u32 ip_idx = 0U;
tegra_hwpm_fn(hwpm, " ");
tegra_hwpm_dbg(hwpm, hwpm_dbg_ip_register,
"Extract IP ops for resource enum %d info", resource_enum);
/* Convert tegra_soc_hwpm_resource to internal enum */
if (!(t264_hwpm_is_resource_active(hwpm, resource_enum, &ip_idx))) {
tegra_hwpm_err(hwpm,
"SOC hwpm resource %d (base 0x%llx) is unconfigured",
resource_enum, (unsigned long long)base_address);
goto fail;
}
switch (ip_idx) {
#if defined(CONFIG_T264_HWPM_IP_VIC)
case T264_HWPM_IP_VIC:
#endif
#if defined(CONFIG_T264_HWPM_IP_PVA)
case T264_HWPM_IP_PVA:
#endif
#if defined(CONFIG_T264_HWPM_IP_OCU)
case T264_HWPM_IP_OCU:
#endif
#if defined(CONFIG_T264_HWPM_IP_SMMU)
case T264_HWPM_IP_SMMU:
#endif
#if defined(CONFIG_T264_HWPM_IP_UCF_MSW)
case T264_HWPM_IP_UCF_MSW:
#endif
#if defined(CONFIG_T264_HWPM_IP_UCF_PSW)
case T264_HWPM_IP_UCF_PSW:
#endif
#if defined(CONFIG_T264_HWPM_IP_UCF_CSW)
case T264_HWPM_IP_UCF_CSW:
#endif
#if defined(CONFIG_T264_HWPM_IP_CPU)
case T264_HWPM_IP_CPU:
#endif
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, ip_ops,
base_address, ip_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"Failed to %s fs/ops for IP %d (base 0x%llx)",
available == true ? "set" : "reset",
ip_idx, (unsigned long long)base_address);
goto fail;
}
break;
#if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
case T264_HWPM_IP_MSS_CHANNEL:
#endif
#if defined(CONFIG_T264_HWPM_IP_MSS_HUBS)
case T264_HWPM_IP_MSS_HUBS:
#endif
/* MSS channel and MSS hubs share MC channels */
/* Check base address in T264_HWPM_IP_MSS_CHANNEL */
#if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
ip_idx = T264_HWPM_IP_MSS_CHANNEL;
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, ip_ops,
base_address, ip_idx, available);
if (ret != 0) {
/*
* Return value of ENODEV will indicate that the base
* address doesn't belong to this IP.
*/
if (ret != -ENODEV) {
tegra_hwpm_err(hwpm,
"IP %d base 0x%llx:Failed to %s fs/ops",
ip_idx, (unsigned long long)base_address,
available == true ? "set" : "reset");
goto fail;
}
/*
* ret = -ENODEV indicates given address doesn't belong
* to IP. This means ip_ops will not be set for this IP.
* This shouldn't be a reason to fail this function.
* Hence, reset ret to 0.
*/
ret = 0;
}
#endif
/* Check base address in T264_HWPM_IP_MSS_HUBS */
#if defined(CONFIG_T264_HWPM_IP_MSS_HUBS)
ip_idx = T264_HWPM_IP_MSS_HUBS;
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, ip_ops,
base_address, ip_idx, available);
if (ret != 0) {
/*
* Return value of ENODEV will indicate that the base
* address doesn't belong to this IP.
*/
if (ret != -ENODEV) {
tegra_hwpm_err(hwpm,
"IP %d base 0x%llx:Failed to %s fs/ops",
ip_idx, (unsigned long long)base_address,
available == true ? "set" : "reset");
goto fail;
}
/*
* ret = -ENODEV indicates given address doesn't belong
* to IP. This means ip_ops will not be set for this IP.
* This shouldn't be a reason to fail this function.
* Hence, reset ret to 0.
*/
ret = 0;
}
#endif
break;
case T264_HWPM_IP_PMA:
case T264_HWPM_IP_RTR:
default:
tegra_hwpm_err(hwpm, "Invalid IP %d for ip_ops", ip_idx);
break;
}
fail:
return ret;
}
static int t264_hwpm_validate_emc_config(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
# if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
struct hwpm_ip *chip_ip = NULL;
struct hwpm_ip_inst *ip_inst = NULL;
u32 inst_idx = 0U;
u32 element_mask_max = 0U;
#endif
u32 mss_disable_fuse_val = 0U;
u32 mss_disable_fuse_val_mask = 0xFU;
u32 mss_disable_fuse_bit_idx = 0U;
u32 emc_element_floorsweep_mask = 0U;
u32 idx = 0U;
int err;
tegra_hwpm_fn(hwpm, " ");
if (!tegra_hwpm_is_platform_silicon()) {
tegra_hwpm_err(hwpm,
"Fuse readl is not implemented yet. Skip for now ");
return 0;
}
#define TEGRA_FUSE_OPT_MSS_DISABLE 0x8c0U
err = tegra_hwpm_fuse_readl(hwpm,
TEGRA_FUSE_OPT_MSS_DISABLE, &mss_disable_fuse_val);
if (err != 0) {
tegra_hwpm_err(hwpm, "emc_disable fuse read failed");
return err;
}
/*
* In floorsweep fuse value,
* each bit corresponds to 4 elements.
* Bit value 0 indicates those elements are
* available and bit value 1 indicates
* corresponding elements are floorswept.
*
* Convert floorsweep fuse value to available EMC elements.
*/
do {
if (!(mss_disable_fuse_val & (0x1U << mss_disable_fuse_bit_idx))) {
emc_element_floorsweep_mask |=
(0xFU << (mss_disable_fuse_bit_idx * 4U));
}
mss_disable_fuse_bit_idx++;
mss_disable_fuse_val_mask = (mss_disable_fuse_val_mask >> 1U);
} while (mss_disable_fuse_val_mask != 0U);
/* Set fuse value in MSS IP instances */
for (idx = 0U; idx < active_chip->get_ip_max_idx(); idx++) {
switch (idx) {
#if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
case T264_HWPM_IP_MSS_CHANNEL:
#endif
# if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
chip_ip = active_chip->chip_ips[idx];
for (inst_idx = 0U; inst_idx < chip_ip->num_instances;
inst_idx++) {
ip_inst = &chip_ip->ip_inst_static_array[
inst_idx];
/*
* Hence use max element mask to get correct
* fs info to use in HWPM driver.
*/
element_mask_max = tegra_hwpm_safe_sub_u32(
tegra_hwpm_safe_cast_u64_to_u32(BIT(
ip_inst->num_core_elements_per_inst)),
1U);
ip_inst->fuse_fs_mask =
(emc_element_floorsweep_mask &
element_mask_max);
tegra_hwpm_dbg(hwpm, hwpm_info,
"ip %d, fuse_mask 0x%x",
idx, ip_inst->fuse_fs_mask);
}
break;
#endif
default:
continue;
}
}
return 0;
}
int t264_hwpm_validate_current_config(struct tegra_soc_hwpm *hwpm)
{
u32 opt_hwpm_disable = 0U;
u32 fa_mode = 0U;
u32 hwpm_global_disable = 0U;
u32 idx = 0U;
int err;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
tegra_hwpm_fn(hwpm, " ");
if (!tegra_hwpm_is_platform_silicon()) {
return 0;
}
err = t264_hwpm_validate_emc_config(hwpm);
if (err != 0) {
tegra_hwpm_err(hwpm, "failed to validate emc config");
return err;
}
#define TEGRA_FUSE_OPT_HWPM_DISABLE 0xc18
/* Read fuse_opt_hwpm_disable_0 fuse */
err = tegra_hwpm_fuse_readl(hwpm,
TEGRA_FUSE_OPT_HWPM_DISABLE, &opt_hwpm_disable);
if (err != 0) {
tegra_hwpm_err(hwpm, "opt_hwpm_disable fuse read failed");
return err;
}
#define TEGRA_FUSE_FA_MODE 0x48U
err = tegra_hwpm_fuse_readl(hwpm, TEGRA_FUSE_FA_MODE, &fa_mode);
if (err != 0) {
tegra_hwpm_err(hwpm, "fa mode fuse read failed");
return err;
}
/*
* Configure global control register to disable PCFIFO interlock
* By writing to MSS_HUB_HUBC_CONFIG_0 register
*/
#define TEGRA_HUB_HUBC_CONFIG0_OFFSET 0x6244U
#define TEGRA_HUB_HUBC_PCFIFO_INTERLOCK_DISABLED 0x1U
err = tegra_hwpm_write_sticky_bits(hwpm, addr_map_mcb_base_r(),
TEGRA_HUB_HUBC_CONFIG0_OFFSET,
TEGRA_HUB_HUBC_PCFIFO_INTERLOCK_DISABLED);
hwpm_assert_print(hwpm, err == 0, return err,
"PCFIFO Interlock disable failed");
#define TEGRA_HWPM_GLOBAL_DISABLE_OFFSET 0x300CU
#define TEGRA_HWPM_GLOBAL_DISABLE_DISABLED 0x0U
err = tegra_hwpm_read_sticky_bits(hwpm, addr_map_pmc_misc_base_r(),
TEGRA_HWPM_GLOBAL_DISABLE_OFFSET, &hwpm_global_disable);
if (err != 0) {
tegra_hwpm_err(hwpm, "hwpm global disable read failed");
return err;
}
/*
* Do not enable override if FA mode fuse is set. FA_MODE fuse enables
* all PERFMONs regardless of level of fuse, sticky bit or secure register
* settings.
*/
if (fa_mode != 0U) {
tegra_hwpm_dbg(hwpm, hwpm_info,
"fa mode fuse enabled, no override required, enable HWPM");
return 0;
}
/* Override enable depends on opt_hwpm_disable and global hwpm disable */
if ((opt_hwpm_disable == 0U) &&
(hwpm_global_disable == TEGRA_HWPM_GLOBAL_DISABLE_DISABLED)) {
tegra_hwpm_dbg(hwpm, hwpm_info,
"OPT_HWPM_DISABLE fuses are disabled, no override required");
return 0;
}
for (idx = 0U; idx < active_chip->get_ip_max_idx(); idx++) {
chip_ip = active_chip->chip_ips[idx];
if ((hwpm_global_disable !=
TEGRA_HWPM_GLOBAL_DISABLE_DISABLED) ||
(opt_hwpm_disable != 0U)) {
/*
* Both HWPM_GLOBAL_DISABLE and OPT_HWPM_DISABLE disables all
* Perfmons in SOC HWPM. Hence, check for either of them to be set.
*/
if ((chip_ip->dependent_fuse_mask &
TEGRA_HWPM_FUSE_HWPM_GLOBAL_DISABLE_MASK) != 0U) {
/*
* check to prevent RTR from being overriden
*/
chip_ip->override_enable = true;
} else {
tegra_hwpm_dbg(hwpm, hwpm_info,
"IP %d not overridden", idx);
}
}
}
return 0;
}
int t264_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
/* Force enable MSS channel IP for AV+L/Q */
if (tegra_hwpm_is_hypervisor_mode()) {
/*
* MSS CHANNEL
* MSS channel driver cannot implement HWPM <-> IP interface in AV + L, and
* AV + Q configs. Since MSS channel is part of both POR and non-POR IPs,
* this force enable is not limited by minimal config or force enable flags.
*/
#if defined(CONFIG_T264_HWPM_IP_MSS_CHANNEL)
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc0_base_r(),
T264_HWPM_IP_MSS_CHANNEL, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_MSS_CHANNEL force enable failed");
return ret;
}
#endif
} else {
#if defined(CONFIG_T264_HWPM_ALLOW_FORCE_ENABLE)
if (tegra_hwpm_is_platform_vsp()) {
/* Static IP instances as per VSP netlist */
}
if (tegra_hwpm_is_platform_silicon()) {
/* Static IP instances corresponding to silicon */
#if defined(CONFIG_T264_HWPM_IP_OCU)
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_ocu_base_r(),
T264_HWPM_IP_OCU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_OCU force enable failed");
return ret;
}
#endif
#if defined(CONFIG_T264_HWPM_IP_UCF_PSW)
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_ucf_psn0_psw_base_r(),
T264_HWPM_IP_UCF_PSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_PSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_ucf_psn1_psw_base_r(),
T264_HWPM_IP_UCF_PSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_PSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_ucf_psn2_psw_base_r(),
T264_HWPM_IP_UCF_PSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_PSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_ucf_psn3_psw_base_r(),
T264_HWPM_IP_UCF_PSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_PSW force enable failed");
return ret;
}
#endif /* CONFIG_T264_HWPM_IP_UCF_PSW */
#if defined(CONFIG_T264_HWPM_IP_UCF_CSW)
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_ucf_csw0_base_r(),
T264_HWPM_IP_UCF_CSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_CSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_ucf_csw1_base_r(),
T264_HWPM_IP_UCF_CSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_CSW force enable failed");
return ret;
}
#endif /* CONFIG_T264_HWPM_IP_UCF_CSW */
#if defined(CONFIG_T264_HWPM_IP_UCF_MSW)
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc0_base_r(),
T264_HWPM_IP_UCF_MSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_MSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc2_base_r(),
T264_HWPM_IP_UCF_MSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_MSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc4_base_r(),
T264_HWPM_IP_UCF_MSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_MSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc6_base_r(),
T264_HWPM_IP_UCF_MSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_MSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc8_base_r(),
T264_HWPM_IP_UCF_MSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_MSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc10_base_r(),
T264_HWPM_IP_UCF_MSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_MSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc12_base_r(),
T264_HWPM_IP_UCF_MSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_MSW force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc14_base_r(),
T264_HWPM_IP_UCF_MSW, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_UCF_MSW force enable failed");
return ret;
}
#endif /* CONFIG_T264_HWPM_IP_UCF_MSW */
#if defined(CONFIG_T264_HWPM_IP_CPU)
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore0_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore1_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore2_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore3_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore4_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore5_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore6_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore7_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore8_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore9_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore10_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore11_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore12_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_cpucore13_base_r(),
T264_HWPM_IP_CPU, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T264_HWPM_IP_CPU force enable failed");
return ret;
}
#endif /* CONFIG_T264_HWPM_IP_CPU */
}
#endif /* CONFIG_T264_HWPM_ALLOW_FORCE_ENABLE */
}
return ret;
}

View File

@@ -0,0 +1,338 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_timers.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
#include <hal/t264/t264_internal.h>
#include <hal/t264/ip/rtr/t264_rtr.h>
#include <hal/t264/hw/t264_pmasys_soc_hwpm.h>
#include <hal/t264/hw/t264_pmmsys_soc_hwpm.h>
int t264_hwpm_disable_mem_mgmt(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reset_val = 0U;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL, &pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
/* Reset OUTBASE register */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_outbase_r(0, 0), &reset_val);
reset_val = set_field(reset_val,
pmasys_channel_outbase_ptr_m(),
pmasys_channel_outbase_ptr_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outbase_r(0, 0), reset_val);
/* Reset OUTBASEUPPER register */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_outbaseupper_r(0, 0), &reset_val);
reset_val = set_field(reset_val,
pmasys_channel_outbaseupper_ptr_m(),
pmasys_channel_outbaseupper_ptr_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outbaseupper_r(0, 0), reset_val);
/* Reset OUTSIZE register */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_outsize_r(0, 0), &reset_val);
reset_val = set_field(reset_val,
pmasys_channel_outsize_numbytes_m(),
pmasys_channel_outsize_numbytes_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outsize_r(0, 0), reset_val);
/* Reset MEM_BYTES_ADDR register */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_mem_bytes_addr_r(0, 0), &reset_val);
reset_val = set_field(reset_val,
pmasys_channel_mem_bytes_addr_ptr_m(),
pmasys_channel_mem_bytes_addr_ptr_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_mem_bytes_addr_r(0, 0), reset_val);
/* Reset MEM_HEAD register */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_mem_head_r(0, 0), &reset_val);
reset_val = set_field(reset_val,
pmasys_channel_mem_head_ptr_m(),
pmasys_channel_mem_head_ptr_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_mem_head_r(0, 0), reset_val);
/* Reset MEM_BYTES register */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_mem_bytes_r(0, 0), &reset_val);
reset_val = set_field(reset_val,
pmasys_channel_mem_bytes_numbytes_m(),
pmasys_channel_mem_bytes_numbytes_init_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_mem_bytes_r(0, 0), reset_val);
/* Reset MEMBUF_STATUS */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0, 0), &reset_val);
reset_val = set_field(reset_val,
pmasys_channel_control_user_membuf_clear_status_m(),
pmasys_channel_control_user_membuf_clear_status_doit_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0, 0), reset_val);
return 0;
}
int t264_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 outbase_lo = 0U;
u32 outbase_hi = 0U;
u32 outsize = 0U;
u32 mem_bytes_addr = 0U;
u32 membuf_status = 0U;
u32 mem_head = 0U;
u32 bpc_mem_block = 0U;
struct hwpm_ip_aperture *pma_perfmux = NULL;
struct tegra_hwpm_mem_mgmt *mem_mgmt = hwpm->mem_mgmt;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL, &pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
outbase_lo = mem_mgmt->stream_buf_va & pmasys_channel_outbase_ptr_m();
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outbase_r(0, 0), outbase_lo);
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream, "OUTBASE = 0x%x", outbase_lo);
outbase_hi = (mem_mgmt->stream_buf_va >> 32) &
pmasys_channel_outbaseupper_ptr_m();
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outbaseupper_r(0, 0), outbase_hi);
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream, "OUTBASEUPPER = 0x%x", outbase_hi);
outsize = mem_mgmt->stream_buf_size &
pmasys_channel_outsize_numbytes_m();
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outsize_r(0, 0), outsize);
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream, "OUTSIZE = 0x%x", outsize);
mem_bytes_addr = mem_mgmt->mem_bytes_buf_va &
pmasys_channel_mem_bytes_addr_ptr_m();
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_mem_bytes_addr_r(0, 0), mem_bytes_addr);
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream,
"MEM_BYTES_ADDR = 0x%x", mem_bytes_addr);
/* Update MEM_HEAD to OUTBASE */
mem_head = mem_mgmt->stream_buf_va & pmasys_channel_mem_head_ptr_m();
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_mem_head_r(0, 0), mem_head);
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream, "MEM_HEAD = 0x%x", mem_head);
/* Reset MEMBUF_STATUS */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0, 0), &membuf_status);
membuf_status = set_field(membuf_status,
pmasys_channel_control_user_membuf_clear_status_m(),
pmasys_channel_control_user_membuf_clear_status_doit_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0, 0), membuf_status);
/* Update CBLOCK_BPC_MEM_BLOCK to OUTBASE to ensure BPC is bound */
bpc_mem_block = mem_mgmt->stream_buf_va &
pmasys_cblock_bpc_mem_block_base_m();
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_cblock_bpc_mem_block_r(0), outbase_lo);
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream, "bpc_mem_block = 0x%x",
bpc_mem_block);
/* Mark mem block valid */
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_cblock_bpc_mem_blockupper_r(0),
pmasys_cblock_bpc_mem_blockupper_valid_f(
pmasys_cblock_bpc_mem_blockupper_valid_true_v()));
return 0;
}
int t264_hwpm_invalidate_mem_config(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_cblock_bpc_mem_blockupper_r(0),
pmasys_cblock_bpc_mem_blockupper_valid_f(
pmasys_cblock_bpc_mem_blockupper_valid_false_v()));
return 0;
}
int t264_hwpm_stream_mem_bytes(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reg_val = 0U;
u32 *mem_bytes_kernel_u32 =
(u32 *)(hwpm->mem_mgmt->mem_bytes_kernel);
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
*mem_bytes_kernel_u32 = TEGRA_HWPM_MEM_BYTES_INVALID;
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0, 0), &reg_val);
reg_val = set_field(reg_val,
pmasys_channel_control_user_update_bytes_m(),
pmasys_channel_control_user_update_bytes_doit_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0, 0), reg_val);
return 0;
}
int t264_hwpm_disable_pma_streaming(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reg_val = 0U;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
/* Disable PMA streaming */
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_command_slice_trigger_config_user_r(0), &reg_val);
reg_val = set_field(reg_val,
pmasys_command_slice_trigger_config_user_record_stream_m(),
pmasys_command_slice_trigger_config_user_record_stream_disable_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_command_slice_trigger_config_user_r(0), reg_val);
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0, 0), &reg_val);
reg_val = set_field(reg_val,
pmasys_channel_config_user_stream_m(),
pmasys_channel_config_user_stream_disable_f());
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0, 0), reg_val);
return 0;
}
int t264_hwpm_update_mem_bytes_get_ptr(struct tegra_soc_hwpm *hwpm,
u64 mem_bump)
{
int err = 0;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
if (mem_bump > (u64)U32_MAX) {
tegra_hwpm_err(hwpm, "mem_bump is out of bounds");
return -EINVAL;
}
tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_mem_bump_r(0, 0), mem_bump);
return 0;
}
int t264_hwpm_get_mem_bytes_put_ptr(struct tegra_soc_hwpm *hwpm,
u64 *mem_head_ptr)
{
int err = 0;
u32 reg_val = 0U;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_mem_head_r(0, 0), &reg_val);
*mem_head_ptr = (u64)reg_val;
return err;
}
int t264_hwpm_membuf_overflow_status(struct tegra_soc_hwpm *hwpm,
u32 *overflow_status)
{
int err = 0;
u32 reg_val, field_val;
struct hwpm_ip_aperture *pma_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, NULL,
&pma_perfmux);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_status_r(0, 0), &reg_val);
field_val = pmasys_channel_status_membuf_status_v(
reg_val);
*overflow_status = (field_val ==
pmasys_channel_status_membuf_status_overflowed_v()) ?
TEGRA_HWPM_MEMBUF_OVERFLOWED : TEGRA_HWPM_MEMBUF_NOT_OVERFLOWED;
return err;
}

View File

@@ -0,0 +1,106 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef T264_HWPM_PERFMON_DEVICE_INDEX_H
#define T264_HWPM_PERFMON_DEVICE_INDEX_H
enum t264_hwpm_perfmon_device_index {
T264_SYSTEM_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
T264_HWPM_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE0_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE1_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE2_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE3_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE4_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE5_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE6_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE7_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE8_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE9_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE10_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE11_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE12_PERFMON_DEVICE_NODE_INDEX,
T264_CPU_CORE13_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW0_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW1_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW2_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW3_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW4_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW5_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW6_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW7_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW8_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW9_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW10_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW11_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW12_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW13_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW14_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSW15_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTA0_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTA1_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTA2_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTA3_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTB0_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTB1_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTB2_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTB3_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTC0_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTC1_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTC2_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTC3_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTD0_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTD1_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTD2_PERFMON_DEVICE_NODE_INDEX,
T264_MSS_CHANNEL_PARTD3_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_CSW0_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_CSW1_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSS_HUB1_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_TCU0_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_TCU1_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_PSW0_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_PSW1_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_PSW2_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_PSW3_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_TCU3_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_MSS_HUB2_PERFMON_DEVICE_NODE_INDEX,
T264_UCF_TCU2_PERFMON_DEVICE_NODE_INDEX,
T264_VICA0_PERFMON_DEVICE_NODE_INDEX,
T264_PVAC0_PERFMON_DEVICE_NODE_INDEX,
T264_PVAV0_PERFMON_DEVICE_NODE_INDEX,
T264_PVAV1_PERFMON_DEVICE_NODE_INDEX,
T264_VISION_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
T264_VISION_MSS_HUB1_PERFMON_DEVICE_NODE_INDEX,
T264_PVAP0_PERFMON_DEVICE_NODE_INDEX,
T264_PVAP1_PERFMON_DEVICE_NODE_INDEX,
T264_DISP_USB_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
T264_DISP_USB_TCU0_PERFMON_DEVICE_NODE_INDEX,
T264_OCU0_PERFMON_DEVICE_NODE_INDEX,
T264_UPHY0_MSS_HUB0_PERFMON_DEVICE_NODE_INDEX,
T264_UPHY0_MSS_HUB1_PERFMON_DEVICE_NODE_INDEX,
T264_PMA_DEVICE_NODE_INDEX,
T264_RTR_DEVICE_NODE_INDEX
};
#endif

View File

@@ -0,0 +1,225 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "t264_regops_allowlist.h"
struct allowlist t264_perfmon_alist[67] = {
{0x00000000, true},
{0x00000004, true},
{0x00000008, true},
{0x0000000c, true},
{0x00000010, true},
{0x00000014, true},
{0x00000020, true},
{0x00000024, true},
{0x00000028, true},
{0x0000002c, true},
{0x00000030, true},
{0x00000034, true},
{0x00000040, true},
{0x00000044, true},
{0x00000048, true},
{0x0000004c, true},
{0x00000050, true},
{0x00000054, true},
{0x00000058, true},
{0x0000005c, true},
{0x00000060, true},
{0x00000064, true},
{0x00000068, true},
{0x0000006c, true},
{0x00000070, true},
{0x00000074, true},
{0x00000078, true},
{0x0000007c, true},
{0x00000080, true},
{0x00000084, true},
{0x00000088, true},
{0x0000008c, true},
{0x00000090, true},
{0x00000098, true},
{0x0000009c, true},
{0x000000a0, true},
{0x000000a4, true},
{0x000000a8, true},
{0x000000ac, true},
{0x000000b0, true},
{0x000000b4, true},
{0x000000b8, true},
{0x000000bc, true},
{0x000000c0, true},
{0x000000c4, true},
{0x000000c8, true},
{0x000000cc, true},
{0x000000d0, true},
{0x000000d4, true},
{0x000000d8, true},
{0x000000dc, true},
{0x000000e0, true},
{0x000000e4, true},
{0x000000e8, true},
{0x000000ec, true},
{0x000000f8, true},
{0x000000fc, true},
{0x00000100, true},
{0x00000108, true},
{0x00000110, true},
{0x00000114, true},
{0x00000118, true},
{0x0000011c, true},
{0x00000120, true},
{0x00000124, true},
{0x00000128, true},
{0x00000130, true},
};
struct allowlist t264_pma_res_cmd_slice_rtr_alist[41] = {
{0x00000858, false},
{0x00000a00, false},
{0x00000a10, false},
{0x00000a14, false},
{0x00000a20, false},
{0x00000a24, false},
{0x00000a28, false},
{0x00000a2c, false},
{0x00000a30, false},
{0x00000a34, false},
{0x00000a38, false},
{0x00000a3c, false},
{0x00001104, false},
{0x00001110, false},
{0x00001114, false},
{0x0000111c, false},
{0x00001120, false},
{0x00001124, false},
{0x00001128, false},
{0x0000112c, false},
{0x00001130, false},
{0x00001134, false},
{0x00001138, false},
{0x0000113c, false},
{0x00001140, false},
{0x00001144, false},
{0x00001148, false},
{0x0000114c, false},
{0x00001150, false},
{0x00001154, false},
{0x00001158, false},
{0x0000115c, false},
{0x00001160, false},
{0x00001164, false},
{0x00001168, false},
{0x0000116c, false},
{0x00001170, false},
{0x00001174, false},
{0x00001178, false},
{0x0000117c, false},
{0x00000818, false},
};
struct allowlist t264_pma_res_pma_alist[1] = {
{0x00000858, true},
};
struct allowlist t264_rtr_alist[2] = {
{0x00000080, false},
{0x000000a4, false},
};
struct allowlist t264_vic_alist[8] = {
{0x00001088, true},
{0x000010a8, true},
{0x0000cb94, true},
{0x0000cb80, true},
{0x0000cb84, true},
{0x0000cb88, true},
{0x0000cb8c, true},
{0x0000cb90, true},
};
struct allowlist t264_pva_pm_alist[10] = {
{0x0000800c, true},
{0x00008010, true},
{0x00008014, true},
{0x00008018, true},
{0x0000801c, true},
{0x00008020, true},
{0x00008024, true},
{0x00008028, true},
{0x0000802c, true},
{0x00008030, true},
};
struct allowlist t264_mss_channel_alist[2] = {
{0x00008914, true},
{0x00008918, true},
};
struct allowlist t264_mss_hub_alist[3] = {
{0x00006f3c, true},
{0x00006f34, true},
{0x00006f38, true},
};
struct allowlist t264_ocu_alist[1] = {
{0x00000058, true},
};
struct allowlist t264_smmu_alist[1] = {
{0x00005000, true},
};
struct allowlist t264_ucf_msw_cbridge_alist[1] = {
{0x0000891c, true},
};
struct allowlist t264_ucf_msn_msw0_alist[2] = {
{0x00000000, true},
{0x00000008, true},
};
struct allowlist t264_ucf_msn_msw1_alist[2] = {
{0x00000010, true},
{0x00000018, true},
};
struct allowlist t264_ucf_msw_slc_alist[1] = {
{0x00000000, true},
};
struct allowlist t264_ucf_psn_psw_alist[2] = {
{0x00000000, true},
{0x00000008, true},
};
struct allowlist t264_ucf_csw_alist[2] = {
{0x00000000, true},
{0x00000008, true},
};
struct allowlist t264_cpucore_alist[4] = {
{0x00000000, true},
{0x00000008, true},
{0x00000010, true},
{0x00000018, true},
};

View File

@@ -0,0 +1,47 @@
/* SPDX-License-Identifier: MIT */
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef T264_HWPM_REGOPS_ALLOWLIST_H
#define T264_HWPM_REGOPS_ALLOWLIST_H
#include <tegra_hwpm.h>
extern struct allowlist t264_perfmon_alist[67];
extern struct allowlist t264_pma_res_cmd_slice_rtr_alist[41];
extern struct allowlist t264_pma_res_pma_alist[1];
extern struct allowlist t264_rtr_alist[2];
extern struct allowlist t264_vic_alist[8];
extern struct allowlist t264_pva_pm_alist[10];
extern struct allowlist t264_mss_channel_alist[2];
extern struct allowlist t264_mss_hub_alist[3];
extern struct allowlist t264_ocu_alist[1];
extern struct allowlist t264_smmu_alist[1];
extern struct allowlist t264_ucf_msw_cbridge_alist[1];
extern struct allowlist t264_ucf_msn_msw0_alist[2];
extern struct allowlist t264_ucf_msn_msw1_alist[2];
extern struct allowlist t264_ucf_msw_slc_alist[1];
extern struct allowlist t264_ucf_psn_psw_alist[2];
extern struct allowlist t264_ucf_csw_alist[2];
extern struct allowlist t264_cpucore_alist[4];
#endif /* T264_HWPM_REGOPS_ALLOWLIST_H */

View File

@@ -0,0 +1,214 @@
// SPDX-License-Identifier: MIT
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <tegra_hwpm_static_analysis.h>
#include <tegra_hwpm_timers.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
#include <hal/t264/t264_internal.h>
#include <hal/t264/hw/t264_pmasys_soc_hwpm.h>
#include <hal/t264/hw/t264_pmmsys_soc_hwpm.h>
#define TEGRA_HWPM_CBLOCK_CHANNEL_TO_CMD_SLICE(cblock, channel) \
(((cblock) * pmmsys_num_channels_per_cblock_v()) + (channel))
#define TEGRA_HWPM_MAX_SUPPORTED_DGS 256U
#define TEGRA_HWPM_NUM_DG_STATUS_PER_REG \
(TEGRA_HWPM_MAX_SUPPORTED_DGS / \
pmmsys_router_user_dgmap_status_secure__size_1_v())
int t264_hwpm_perfmon_enable(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon)
{
u32 reg_val;
u32 cblock = 0U;
u32 channel = 0U;
u32 dg_idx = 0U;
u32 config_dgmap = 0U;
u32 dgmap_status_reg_idx = 0U, dgmap_status_reg_dgidx = 0U;
u32 retries = 10U;
u32 sleep_msecs = 100U;
int err = 0;
struct hwpm_ip_aperture *rtr_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, &rtr_perfmux,
NULL);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
/* Enable */
tegra_hwpm_dbg(hwpm, hwpm_dbg_bind,
"Enabling PERFMON(0x%llx - 0x%llx)",
(unsigned long long)perfmon->start_abs_pa,
(unsigned long long)perfmon->end_abs_pa);
/*
* HWPM readl function expects register address relative to
* perfmon group base address.
* Hence use enginestatus offset + perfmon base_pa as the register
*/
tegra_hwpm_readl(hwpm, perfmon,
tegra_hwpm_safe_add_u64(pmmsys_enginestatus_o(),
perfmon->base_pa), &reg_val);
reg_val = set_field(reg_val, pmmsys_enginestatus_enable_m(),
pmmsys_enginestatus_enable_out_f());
tegra_hwpm_writel(hwpm, perfmon,
tegra_hwpm_safe_add_u64(pmmsys_enginestatus_o(),
perfmon->base_pa), reg_val);
/*
* HWPM readl function expects register address relative to
* perfmon group base address.
* Hence use secure_config offset + perfmon base_pa as the register
* The register also contains dg_idx programmed by HW that will be used
* to poll dg mapping in router.
*/
tegra_hwpm_readl(hwpm, perfmon,
tegra_hwpm_safe_add_u64(pmmsys_secure_config_o(),
perfmon->base_pa), &config_dgmap);
dg_idx = pmmsys_secure_config_dg_idx_v(config_dgmap);
/* Configure DG map for this perfmon */
config_dgmap = set_field(config_dgmap,
pmmsys_secure_config_cmd_slice_id_m() |
pmmsys_secure_config_channel_id_m() |
pmmsys_secure_config_cblock_id_m() |
pmmsys_secure_config_mapped_m() |
pmmsys_secure_config_use_prog_dg_idx_m() |
pmmsys_secure_config_command_pkt_decoder_m(),
pmmsys_secure_config_cmd_slice_id_f(
TEGRA_HWPM_CBLOCK_CHANNEL_TO_CMD_SLICE(
cblock, channel)) |
pmmsys_secure_config_channel_id_f(channel) |
pmmsys_secure_config_cblock_id_f(cblock) |
pmmsys_secure_config_mapped_true_f() |
pmmsys_secure_config_use_prog_dg_idx_false_f() |
pmmsys_secure_config_command_pkt_decoder_enable_f());
tegra_hwpm_writel(hwpm, perfmon,
tegra_hwpm_safe_add_u64(pmmsys_secure_config_o(),
perfmon->base_pa), config_dgmap);
/* Make sure that the DG map status is propagated to the router */
dgmap_status_reg_idx = dg_idx / TEGRA_HWPM_NUM_DG_STATUS_PER_REG;
dgmap_status_reg_dgidx = dg_idx % TEGRA_HWPM_NUM_DG_STATUS_PER_REG;
tegra_hwpm_timeout_print(hwpm, retries, sleep_msecs, rtr_perfmux,
pmmsys_router_user_dgmap_status_secure_r(dgmap_status_reg_idx),
&reg_val,
(((reg_val >> dgmap_status_reg_dgidx) &
pmmsys_router_user_dgmap_status_secure_dg_s()) !=
pmmsys_router_user_dgmap_status_secure_dg_mapped_v()),
"Perfmon(0x%llx - 0x%llx) dgmap %d status update timed out",
(unsigned long long)perfmon->start_abs_pa,
(unsigned long long)perfmon->end_abs_pa, dg_idx);
return 0;
}
int t264_hwpm_perfmon_disable(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon)
{
u32 reg_val;
u32 dg_idx = 0U;
u32 config_dgmap = 0U;
u32 dgmap_status_reg_idx = 0U, dgmap_status_reg_dgidx = 0U;
u32 retries = 10U;
u32 sleep_msecs = 100U;
int err = 0;
struct hwpm_ip_aperture *rtr_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
if (perfmon->element_type == HWPM_ELEMENT_PERFMUX) {
/*
* Since HWPM elements use perfmon functions,
* skip disabling HWPM PERFMUX elements
*/
return 0;
}
err = hwpm->active_chip->get_rtr_pma_perfmux_ptr(hwpm, &rtr_perfmux,
NULL);
hwpm_assert_print(hwpm, err == 0, return err,
"get rtr pma perfmux failed");
/* Disable */
tegra_hwpm_dbg(hwpm, hwpm_dbg_release_resource,
"Disabling PERFMON(0x%llx - 0x%llx)",
(unsigned long long)perfmon->start_abs_pa,
(unsigned long long)perfmon->end_abs_pa);
/*
* HWPM readl function expects register address relative to
* perfmon group base address.
* Hence use sys0_control offset + perfmon base_pa as the register
*/
tegra_hwpm_readl(hwpm, perfmon,
tegra_hwpm_safe_add_u64(pmmsys_control_o(),
perfmon->base_pa), &reg_val);
reg_val = set_field(reg_val, pmmsys_control_mode_m(),
pmmsys_control_mode_disable_f());
tegra_hwpm_writel(hwpm, perfmon,
tegra_hwpm_safe_add_u64(pmmsys_control_o(),
perfmon->base_pa), reg_val);
/*
* HWPM readl function expects register address relative to
* perfmon group base address.
* Hence use secure_config offset + perfmon base_pa as the register
* The register also contains dg_idx programmed by HW that will be used
* to poll dg mapping in router.
*/
tegra_hwpm_readl(hwpm, perfmon,
tegra_hwpm_safe_add_u64(pmmsys_secure_config_o(),
perfmon->base_pa), &config_dgmap);
dg_idx = pmmsys_secure_config_dg_idx_v(config_dgmap);
/* Reset DG map for this perfmon */
config_dgmap = set_field(config_dgmap,
pmmsys_secure_config_mapped_m(),
pmmsys_secure_config_mapped_false_f());
tegra_hwpm_writel(hwpm, perfmon,
tegra_hwpm_safe_add_u64(pmmsys_secure_config_o(),
perfmon->base_pa), config_dgmap);
/* Make sure that the DG map status is propagated to the router */
dgmap_status_reg_idx = dg_idx / TEGRA_HWPM_NUM_DG_STATUS_PER_REG;
dgmap_status_reg_dgidx = dg_idx % TEGRA_HWPM_NUM_DG_STATUS_PER_REG;
tegra_hwpm_timeout_print(hwpm, retries, sleep_msecs, rtr_perfmux,
pmmsys_router_user_dgmap_status_secure_r(dgmap_status_reg_idx),
&reg_val,
(((reg_val >> dgmap_status_reg_dgidx) &
pmmsys_router_user_dgmap_status_secure_dg_s()) !=
pmmsys_router_user_dgmap_status_secure_dg_not_mapped_v()),
"Perfmon(0x%llx - 0x%llx) dgmap %d status update timed out",
(unsigned long long)perfmon->start_abs_pa,
(unsigned long long)perfmon->end_abs_pa, dg_idx);
return 0;
}

View File

@@ -40,11 +40,11 @@ static const struct of_device_id tegra_soc_hwpm_of_match[] = {
{
.compatible = "nvidia,th500-soc-hwpm",
},
{
.compatible = "nvidia,t264-soc-hwpm",
},
#ifdef CONFIG_TEGRA_NEXT1_HWPM
#include <os/linux/next1_driver.h>
#endif
#ifdef CONFIG_TEGRA_NEXT3_HWPM
#include <os/linux/next3_driver.h>
#endif
{ },
};

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -31,9 +31,6 @@
#if defined(CONFIG_TEGRA_NEXT1_HWPM)
#include <os/linux/next1_soc_utils.h>
#endif
#if defined(CONFIG_TEGRA_NEXT3_HWPM)
#include <os/linux/next3_soc_utils.h>
#endif
static struct hwpm_soc_chip_info chip_info = {
.chip_id = CHIP_ID_UNKNOWN,
@@ -56,6 +53,12 @@ const struct hwpm_soc_chip_info th500_chip_info = {
};
#endif
const struct hwpm_soc_chip_info t264_soc_chip_info = {
.chip_id = 0x26,
.chip_id_rev = 0x4,
.platform = PLAT_SI,
};
/* This function should be invoked only once before retrieving soc chip info */
int tegra_hwpm_init_chip_info(struct tegra_hwpm_os_linux *hwpm_linux)
{
@@ -96,13 +99,16 @@ int tegra_hwpm_init_chip_info(struct tegra_hwpm_os_linux *hwpm_linux)
goto complete;
}
#endif /* CONFIG_ACPI */
#if defined(CONFIG_TEGRA_NEXT1_HWPM)
if (tegra_hwpm_next1_get_chip_compatible(&chip_info) == 0) {
if (of_machine_is_compatible("nvidia,tegra264")) {
chip_info.chip_id = t264_soc_chip_info.chip_id;
chip_info.chip_id_rev = t264_soc_chip_info.chip_id_rev;
chip_info.platform = t264_soc_chip_info.platform;
goto complete;
}
#endif
#if defined(CONFIG_TEGRA_NEXT3_HWPM)
if (tegra_hwpm_next3_get_chip_compatible(&chip_info) == 0) {
#if defined(CONFIG_TEGRA_NEXT1_HWPM)
if (tegra_hwpm_next1_get_chip_compatible(&chip_info) == 0) {
goto complete;
}
#endif