diff --git a/Makefile b/Makefile index 346166c..bf7e15b 100644 --- a/Makefile +++ b/Makefile @@ -5,15 +5,68 @@ GCOV_PROFILE := y ccflags-y += -I$(srctree.nvidia)/drivers/platform/tegra/hwpm +ccflags-y += -I$(srctree.nvidia)/drivers/platform/tegra/hwpm/include ccflags-y += -I$(srctree.nvidia)/include -obj-y += tegra-soc-hwpm.o -obj-y += tegra-soc-hwpm-io.o -obj-y += tegra-soc-hwpm-ioctl.o -obj-y += tegra-soc-hwpm-log.o -obj-y += tegra-soc-hwpm-ip.o -obj-y += hal/tegra_soc_hwpm_init.o -obj-y += hal/t234/t234_soc_hwpm_init.o -obj-y += hal/t234/t234_soc_hwpm_mem_buf_utils.o -obj-y += hal/t234/t234_soc_hwpm_resource_utils.o -obj-$(CONFIG_DEBUG_FS) += tegra-soc-hwpm-debugfs.o +# +# Control IP config +# TODO: Set IP flag and include IP source file as per build config +# +ccflags-y += -DCONFIG_SOC_HWPM_IP_VI=1 +ccflags-y += -DCONFIG_SOC_HWPM_IP_ISP +ccflags-y += -DCONFIG_SOC_HWPM_IP_VIC +ccflags-y += -DCONFIG_SOC_HWPM_IP_OFA +ccflags-y += -DCONFIG_SOC_HWPM_IP_PVA +ccflags-y += -DCONFIG_SOC_HWPM_IP_NVDLA +ccflags-y += -DCONFIG_SOC_HWPM_IP_MGBE +ccflags-y += -DCONFIG_SOC_HWPM_IP_SCF +ccflags-y += -DCONFIG_SOC_HWPM_IP_NVDEC +ccflags-y += -DCONFIG_SOC_HWPM_IP_NVENC +ccflags-y += -DCONFIG_SOC_HWPM_IP_PCIE +ccflags-y += -DCONFIG_SOC_HWPM_IP_DISPLAY=1 +ccflags-y += -DCONFIG_SOC_HWPM_IP_MSS_CHANNEL +ccflags-y += -DCONFIG_SOC_HWPM_IP_MSS_GPU_HUB +ccflags-y += -DCONFIG_SOC_HWPM_IP_MSS_ISO_NISO_HUBS +ccflags-y += -DCONFIG_SOC_HWPM_IP_MSS_MCF + +obj-$(CONFIG_DEBUG_FS) += os/linux/tegra_hwpm_debugfs.o +obj-y += os/linux/tegra_hwpm_linux.o +obj-y += os/linux/tegra_hwpm_io.o +obj-y += os/linux/tegra_hwpm_ip.o +obj-y += os/linux/tegra_hwpm_ioctl.o +obj-y += os/linux/tegra_hwpm_log.o + +obj-y += common/tegra_hwpm_alist_utils.o +obj-y += common/tegra_hwpm_mem_buf_utils.o +obj-y += common/tegra_hwpm_regops_utils.o +obj-y += common/tegra_hwpm_resource_utils.o +obj-y += common/tegra_hwpm_init.o + +obj-y += hal/t234/t234_hwpm_alist_utils.o +obj-y += hal/t234/t234_hwpm_aperture_utils.o +obj-y += hal/t234/t234_hwpm_interface_utils.o +obj-y += hal/t234/t234_hwpm_ip_utils.o +obj-y += hal/t234/t234_hwpm_mem_buf_utils.o +obj-y += hal/t234/t234_hwpm_regops_utils.o +obj-y += hal/t234/t234_hwpm_regops_allowlist.o +obj-y += hal/t234/t234_hwpm_resource_utils.o + +obj-y += hal/t234/ip/pma/t234_hwpm_ip_pma.o +obj-y += hal/t234/ip/rtr/t234_hwpm_ip_rtr.o + +obj-y += hal/t234/ip/display/t234_hwpm_ip_display.o +obj-y += hal/t234/ip/isp/t234_hwpm_ip_isp.o +obj-y += hal/t234/ip/mgbe/t234_hwpm_ip_mgbe.o +obj-y += hal/t234/ip/mss_channel/t234_hwpm_ip_mss_channel.o +obj-y += hal/t234/ip/mss_gpu_hub/t234_hwpm_ip_mss_gpu_hub.o +obj-y += hal/t234/ip/mss_iso_niso_hubs/t234_hwpm_ip_mss_iso_niso_hubs.o +obj-y += hal/t234/ip/mss_mcf/t234_hwpm_ip_mss_mcf.o +obj-y += hal/t234/ip/nvdec/t234_hwpm_ip_nvdec.o +obj-y += hal/t234/ip/nvdla/t234_hwpm_ip_nvdla.o +obj-y += hal/t234/ip/nvenc/t234_hwpm_ip_nvenc.o +obj-y += hal/t234/ip/ofa/t234_hwpm_ip_ofa.o +obj-y += hal/t234/ip/pcie/t234_hwpm_ip_pcie.o +obj-y += hal/t234/ip/pva/t234_hwpm_ip_pva.o +obj-y += hal/t234/ip/scf/t234_hwpm_ip_scf.o +obj-y += hal/t234/ip/vi/t234_hwpm_ip_vi.o +obj-y += hal/t234/ip/vic/t234_hwpm_ip_vic.o diff --git a/common/tegra_hwpm_alist_utils.c b/common/tegra_hwpm_alist_utils.c new file mode 100644 index 0000000..6a13a05 --- /dev/null +++ b/common/tegra_hwpm_alist_utils.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +int tegra_soc_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + + hwpm->full_alist_size = 0ULL; + + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->active_chip->get_alist_size == NULL) { + tegra_hwpm_err(hwpm, "get_alist_size uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->get_alist_size(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "get_alist_size failed"); + return ret; + } + + return 0; +} + +int tegra_soc_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct) +{ + int err = 0; + long pinned_pages = 0; + long page_idx = 0; + u64 alist_buf_size = 0; + u64 num_pages = 0; + u64 *full_alist_u64 = NULL; + void *full_alist = NULL; + struct page **pages = NULL; + struct tegra_soc_hwpm_query_allowlist *query_allowlist = + (struct tegra_soc_hwpm_query_allowlist *)ioctl_struct; + unsigned long user_va = (unsigned long)(query_allowlist->allowlist); + unsigned long offset = user_va & ~PAGE_MASK; + + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->full_alist_size < 0) { + tegra_hwpm_err(hwpm, "Invalid allowlist size"); + return -EINVAL; + } + if (hwpm->active_chip->get_alist_buf_size == NULL) { + tegra_hwpm_err(hwpm, "alist_buf_size uninitialized"); + return -ENODEV; + } + alist_buf_size = hwpm->full_alist_size * + hwpm->active_chip->get_alist_buf_size(hwpm); + + /* Memory map user buffer into kernel address space */ + num_pages = DIV_ROUND_UP(offset + alist_buf_size, PAGE_SIZE); + pages = (struct page **)kzalloc(sizeof(*pages) * num_pages, GFP_KERNEL); + if (!pages) { + tegra_hwpm_err(hwpm, + "Couldn't allocate memory for pages array"); + err = -ENOMEM; + goto alist_unmap; + } + + pinned_pages = get_user_pages(user_va & PAGE_MASK, num_pages, 0, + pages, NULL); + if (pinned_pages != num_pages) { + tegra_hwpm_err(hwpm, "Requested %llu pages / Got %ld pages", + num_pages, pinned_pages); + err = -ENOMEM; + goto alist_unmap; + } + + full_alist = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); + if (!full_alist) { + tegra_hwpm_err(hwpm, "Couldn't map allowlist buffer into" + " kernel address space"); + err = -ENOMEM; + goto alist_unmap; + } + full_alist_u64 = (u64 *)(full_alist + offset); + + if (hwpm->active_chip->combine_alist == NULL) { + tegra_hwpm_err(hwpm, "combine_alist uninitialized"); + return -ENODEV; + } + err = hwpm->active_chip->combine_alist(hwpm, full_alist_u64); + if (err != 0) { + goto alist_unmap; + } + + query_allowlist->allowlist_size = hwpm->full_alist_size; + return 0; + +alist_unmap: + if (full_alist) + vunmap(full_alist); + if (pinned_pages > 0) { + for (page_idx = 0; page_idx < pinned_pages; page_idx++) { + set_page_dirty(pages[page_idx]); + put_page(pages[page_idx]); + } + } + if (pages) { + kfree(pages); + } + + return err; +} \ No newline at end of file diff --git a/common/tegra_hwpm_init.c b/common/tegra_hwpm_init.c new file mode 100644 index 0000000..81e6117 --- /dev/null +++ b/common/tegra_hwpm_init.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +int tegra_soc_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm) +{ + int err = -EINVAL; + + tegra_hwpm_fn(hwpm, " "); + + hwpm->device_info.chip = tegra_get_chip_id(); + hwpm->device_info.chip_revision = tegra_get_major_rev(); + hwpm->device_info.revision = tegra_chip_get_revision(); + hwpm->device_info.platform = tegra_get_platform(); + + hwpm->dbg_mask = TEGRA_HWPM_DEFAULT_DBG_MASK; + + switch (hwpm->device_info.chip) { + case 0x23: + switch (hwpm->device_info.chip_revision) { + case 0x4: + err = t234_hwpm_init_chip_info(hwpm); + break; + default: + tegra_hwpm_err(hwpm, "Chip 0x%x rev 0x%x not supported", + hwpm->device_info.chip, + hwpm->device_info.chip_revision); + break; + } + break; + default: + tegra_hwpm_err(hwpm, "Chip 0x%x not supported", + hwpm->device_info.chip); + break; + } + + if (err != 0) { + tegra_hwpm_err(hwpm, "init_chip_info failed"); + } + + return err; +} + +int tegra_soc_hwpm_setup_sw(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->active_chip->init_fs_info == NULL) { + tegra_hwpm_err(hwpm, "init_fs_info uninitialized"); + goto enodev; + } + ret = hwpm->active_chip->init_fs_info(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Unable to initialize chip fs_info"); + goto fail; + } + + /* Initialize SW state */ + hwpm->bind_completed = false; + hwpm->full_alist_size = 0; + + return 0; +enodev: + ret = -ENODEV; +fail: + return ret; +} + +int tegra_soc_hwpm_setup_hw(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + /* + * Map PMA and RTR apertures + * PMA and RTR are hwpm apertures which include hwpm config registers. + * Map/reserve these apertures to get MMIO address required for hwpm + * configuration (following steps). + */ + if (hwpm->active_chip->reserve_pma == NULL) { + tegra_hwpm_err(hwpm, "reserve_pma uninitialized"); + goto enodev; + } + ret = hwpm->active_chip->reserve_pma(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Unable to reserve PMA aperture"); + goto fail; + } + + if (hwpm->active_chip->reserve_rtr == NULL) { + tegra_hwpm_err(hwpm, "reserve_rtr uninitialized"); + goto enodev; + } + ret = hwpm->active_chip->reserve_rtr(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Unable to reserve RTR aperture"); + goto fail; + } + + /* Disable SLCG */ + if (hwpm->active_chip->disable_slcg == NULL) { + tegra_hwpm_err(hwpm, "disable_slcg uninitialized"); + goto enodev; + } + ret = hwpm->active_chip->disable_slcg(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Unable to disable SLCG"); + goto fail; + } + + /* Program PROD values */ + if (hwpm->active_chip->init_prod_values == NULL) { + tegra_hwpm_err(hwpm, "init_prod_values uninitialized"); + goto enodev; + } + ret = hwpm->active_chip->init_prod_values(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Unable to set PROD values"); + goto fail; + } + + return 0; +enodev: + ret = -ENODEV; +fail: + return ret; +} + +int tegra_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm) +{ + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->active_chip->disable_triggers == NULL) { + tegra_hwpm_err(hwpm, "disable_triggers uninitialized"); + return -ENODEV; + } + return hwpm->active_chip->disable_triggers(hwpm); +} + +int tegra_soc_hwpm_release_hw(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + /* Enable SLCG */ + if (hwpm->active_chip->enable_slcg == NULL) { + tegra_hwpm_err(hwpm, "enable_slcg uninitialized"); + goto enodev; + } + ret = hwpm->active_chip->enable_slcg(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Unable to enable SLCG"); + goto fail; + } + + /* + * Unmap PMA and RTR apertures + * Since, PMA and RTR hwpm apertures consist of hwpm config registers, + * these aperture mappings are required to reset hwpm config. + * Hence, explicitly unmap/release these apertures as a last step. + */ + if (hwpm->active_chip->release_rtr == NULL) { + tegra_hwpm_err(hwpm, "release_rtr uninitialized"); + goto enodev; + } + ret = hwpm->active_chip->release_rtr(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Unable to release RTR aperture"); + goto fail; + } + + if (hwpm->active_chip->release_pma == NULL) { + tegra_hwpm_err(hwpm, "release_pma uninitialized"); + goto enodev; + } + ret = hwpm->active_chip->release_pma(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Unable to release PMA aperture"); + goto fail; + } + + return 0; +enodev: + ret = -ENODEV; +fail: + return ret; +} + +void tegra_soc_hwpm_release_sw_components(struct tegra_soc_hwpm *hwpm) +{ + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->active_chip->release_sw_setup == NULL) { + tegra_hwpm_err(hwpm, "release_sw_setup uninitialized"); + } else { + hwpm->active_chip->release_sw_setup(hwpm); + } + + kfree(hwpm->active_chip->chip_ips); + kfree(hwpm); + tegra_soc_hwpm_pdev = NULL; +} diff --git a/common/tegra_hwpm_mem_buf_utils.c b/common/tegra_hwpm_mem_buf_utils.c new file mode 100644 index 0000000..e08d5e8 --- /dev/null +++ b/common/tegra_hwpm_mem_buf_utils.c @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int tegra_hwpm_dma_map_stream_buffer(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream) +{ + tegra_hwpm_fn(hwpm, " "); + + hwpm->stream_dma_buf = dma_buf_get(alloc_pma_stream->stream_buf_fd); + if (IS_ERR(hwpm->stream_dma_buf)) { + tegra_hwpm_err(hwpm, "Unable to get stream dma_buf"); + return PTR_ERR(hwpm->stream_dma_buf); + } + hwpm->stream_attach = dma_buf_attach(hwpm->stream_dma_buf, hwpm->dev); + if (IS_ERR(hwpm->stream_attach)) { + tegra_hwpm_err(hwpm, "Unable to attach stream dma_buf"); + return PTR_ERR(hwpm->stream_attach); + } + hwpm->stream_sgt = dma_buf_map_attachment(hwpm->stream_attach, + DMA_FROM_DEVICE); + if (IS_ERR(hwpm->stream_sgt)) { + tegra_hwpm_err(hwpm, "Unable to map stream attachment"); + return PTR_ERR(hwpm->stream_sgt); + } + + return 0; +} + +static int tegra_hwpm_dma_map_mem_bytes_buffer(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream) +{ + tegra_hwpm_fn(hwpm, " "); + + hwpm->mem_bytes_dma_buf = + dma_buf_get(alloc_pma_stream->mem_bytes_buf_fd); + if (IS_ERR(hwpm->mem_bytes_dma_buf)) { + tegra_hwpm_err(hwpm, "Unable to get mem bytes dma_buf"); + return PTR_ERR(hwpm->mem_bytes_dma_buf); + } + + hwpm->mem_bytes_attach = dma_buf_attach(hwpm->mem_bytes_dma_buf, + hwpm->dev); + if (IS_ERR(hwpm->mem_bytes_attach)) { + tegra_hwpm_err(hwpm, "Unable to attach mem bytes dma_buf"); + return PTR_ERR(hwpm->mem_bytes_attach); + } + + hwpm->mem_bytes_sgt = dma_buf_map_attachment(hwpm->mem_bytes_attach, + DMA_FROM_DEVICE); + if (IS_ERR(hwpm->mem_bytes_sgt)) { + tegra_hwpm_err(hwpm, "Unable to map mem bytes attachment"); + return PTR_ERR(hwpm->mem_bytes_sgt); + } + + hwpm->mem_bytes_kernel = dma_buf_vmap(hwpm->mem_bytes_dma_buf); + if (!hwpm->mem_bytes_kernel) { + tegra_hwpm_err(hwpm, + "Unable to map mem_bytes buffer into kernel VA space"); + return -ENOMEM; + } + memset(hwpm->mem_bytes_kernel, 0, 32); + + return 0; +} + +static int tegra_hwpm_reset_stream_buf(struct tegra_soc_hwpm *hwpm) +{ + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->stream_sgt && (!IS_ERR(hwpm->stream_sgt))) { + dma_buf_unmap_attachment(hwpm->stream_attach, + hwpm->stream_sgt, + DMA_FROM_DEVICE); + } + hwpm->stream_sgt = NULL; + + if (hwpm->stream_attach && (!IS_ERR(hwpm->stream_attach))) { + dma_buf_detach(hwpm->stream_dma_buf, hwpm->stream_attach); + } + hwpm->stream_attach = NULL; + + if (hwpm->stream_dma_buf && (!IS_ERR(hwpm->stream_dma_buf))) { + dma_buf_put(hwpm->stream_dma_buf); + } + hwpm->stream_dma_buf = NULL; + + if (hwpm->mem_bytes_kernel) { + dma_buf_vunmap(hwpm->mem_bytes_dma_buf, + hwpm->mem_bytes_kernel); + hwpm->mem_bytes_kernel = NULL; + } + + if (hwpm->mem_bytes_sgt && (!IS_ERR(hwpm->mem_bytes_sgt))) { + dma_buf_unmap_attachment(hwpm->mem_bytes_attach, + hwpm->mem_bytes_sgt, + DMA_FROM_DEVICE); + } + hwpm->mem_bytes_sgt = NULL; + + if (hwpm->mem_bytes_attach && (!IS_ERR(hwpm->mem_bytes_attach))) { + dma_buf_detach(hwpm->mem_bytes_dma_buf, hwpm->mem_bytes_attach); + } + hwpm->mem_bytes_attach = NULL; + + if (hwpm->mem_bytes_dma_buf && (!IS_ERR(hwpm->mem_bytes_dma_buf))) { + dma_buf_put(hwpm->mem_bytes_dma_buf); + } + hwpm->mem_bytes_dma_buf = NULL; + + return 0; +} + +int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream) +{ + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + /* Memory map stream buffer */ + ret = tegra_hwpm_dma_map_stream_buffer(hwpm, alloc_pma_stream); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to map stream buffer"); + goto fail; + } + + alloc_pma_stream->stream_buf_pma_va = + sg_dma_address(hwpm->stream_sgt->sgl); + if (alloc_pma_stream->stream_buf_pma_va == 0) { + tegra_hwpm_err(hwpm, "Invalid stream buffer SMMU IOVA"); + ret = -ENXIO; + goto fail; + } + tegra_hwpm_dbg(hwpm, hwpm_verbose, "stream_buf_pma_va = 0x%llx", + alloc_pma_stream->stream_buf_pma_va); + + /* Memory map mem bytes buffer */ + ret = tegra_hwpm_dma_map_mem_bytes_buffer(hwpm, alloc_pma_stream); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to map mem bytes buffer"); + goto fail; + } + + /* Configure memory management */ + if (hwpm->active_chip->enable_mem_mgmt == NULL) { + tegra_hwpm_err(hwpm, "enable memory mgmt HAL uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->enable_mem_mgmt(hwpm, alloc_pma_stream); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to configure stream memory"); + goto fail; + } + + return 0; + +fail: + /* Invalidate memory config */ + if (hwpm->active_chip->invalidate_mem_config == NULL) { + tegra_hwpm_err(hwpm, "invalidate_mem_config HAL uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->invalidate_mem_config(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to invalidate memory config"); + } + + /* Disable memory management */ + if (hwpm->active_chip->disable_mem_mgmt == NULL) { + tegra_hwpm_err(hwpm, "disable_mem_mgmt HAL uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->disable_mem_mgmt(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to disable memory management"); + } + + alloc_pma_stream->stream_buf_pma_va = 0; + + /* Reset stream buffer */ + ret = tegra_hwpm_reset_stream_buf(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to reset stream buffer"); + } + + return ret; +} + +int tegra_hwpm_clear_mem_pipeline(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + /* Stream MEM_BYTES to clear pipeline */ + if (hwpm->mem_bytes_kernel) { + bool timeout = false; + u32 *mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel); + + if (hwpm->active_chip->stream_mem_bytes == NULL) { + tegra_hwpm_err(hwpm, "stream_mem_bytes uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->stream_mem_bytes(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "Failed to trigger mem_bytes streaming"); + } + timeout = HWPM_TIMEOUT(*mem_bytes_kernel_u32 != + TEGRA_SOC_HWPM_MEM_BYTES_INVALID, + "MEM_BYTES streaming"); + if (timeout && ret == 0) { + ret = -EIO; + } + } + + if (hwpm->active_chip->disable_pma_streaming == NULL) { + tegra_hwpm_err(hwpm, "disable_pma_streaming uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->disable_pma_streaming(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to disable pma streaming"); + } + + /* Disable memory management */ + if (hwpm->active_chip->disable_mem_mgmt == NULL) { + tegra_hwpm_err(hwpm, "disable_mem_mgmt HAL uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->disable_mem_mgmt(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to disable memory management"); + } + + /* Reset stream buffer */ + ret = tegra_hwpm_reset_stream_buf(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to reset stream buffer"); + } + + return ret; +} + +int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_update_get_put *update_get_put) +{ + int ret; + + tegra_hwpm_fn(hwpm, " "); + + /* Update SW get pointer */ + if (hwpm->active_chip->update_mem_bytes_get_ptr == NULL) { + tegra_hwpm_err(hwpm, "update_mem_bytes_get_ptr uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->update_mem_bytes_get_ptr(hwpm, + update_get_put->mem_bump); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to update mem_bytes get ptr"); + return -EINVAL; + } + + /* Stream MEM_BYTES value to MEM_BYTES buffer */ + if (update_get_put->b_stream_mem_bytes) { + if (hwpm->active_chip->stream_mem_bytes == NULL) { + tegra_hwpm_err(hwpm, "stream_mem_bytes uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->stream_mem_bytes(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "Failed to trigger mem_bytes streaming"); + } + } + + /* Read HW put pointer */ + if (update_get_put->b_read_mem_head) { + if (hwpm->active_chip->get_mem_bytes_put_ptr == NULL) { + tegra_hwpm_err(hwpm, + "get_mem_bytes_put_ptr uninitialized"); + return -ENODEV; + } + update_get_put->mem_head = + hwpm->active_chip->get_mem_bytes_put_ptr(hwpm); + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "MEM_HEAD = 0x%llx", update_get_put->mem_head); + } + + /* Check overflow error status */ + if (update_get_put->b_check_overflow) { + if (hwpm->active_chip->membuf_overflow_status == NULL) { + tegra_hwpm_err(hwpm, + "membuf_overflow_status uninitialized"); + return -ENODEV; + } + update_get_put->b_overflowed = + hwpm->active_chip->membuf_overflow_status(hwpm); + tegra_hwpm_dbg(hwpm, hwpm_verbose, "OVERFLOWED = %u", + update_get_put->b_overflowed); + } + + return 0; +} diff --git a/common/tegra_hwpm_regops_utils.c b/common/tegra_hwpm_regops_utils.c new file mode 100644 index 0000000..4506d59 --- /dev/null +++ b/common/tegra_hwpm_regops_utils.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include + +#include + +#include +#include +#include + +int tegra_soc_hwpm_exec_regops(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_exec_reg_ops *exec_reg_ops) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + int op_idx = 0; + struct tegra_soc_hwpm_reg_op *reg_op = NULL; + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + switch (exec_reg_ops->mode) { + case TEGRA_SOC_HWPM_REG_OP_MODE_FAIL_ON_FIRST: + case TEGRA_SOC_HWPM_REG_OP_MODE_CONT_ON_ERR: + break; + + default: + tegra_hwpm_err(hwpm, "Invalid reg ops mode(%u)", + exec_reg_ops->mode); + return -EINVAL; + } + + if (exec_reg_ops->op_count > TEGRA_SOC_HWPM_REG_OPS_SIZE) { + tegra_hwpm_err(hwpm, "Reg_op count=%d exceeds max count", + exec_reg_ops->op_count); + return -EINVAL; + } + + if (active_chip->exec_reg_ops == NULL) { + tegra_hwpm_err(hwpm, "exec_reg_ops uninitialized"); + return -ENODEV; + } + + /* + * Initialize flag to true assuming all regops will pass + * If any regop fails, the flag will be reset to false. + */ + exec_reg_ops->b_all_reg_ops_passed = true; + + for (op_idx = 0; op_idx < exec_reg_ops->op_count; op_idx++) { + reg_op = &(exec_reg_ops->ops[op_idx]); + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "reg op: idx(%d), phys(0x%llx), cmd(%u)", + op_idx, reg_op->phys_addr, reg_op->cmd); + + ret = active_chip->exec_reg_ops(hwpm, reg_op); + if (ret < 0) { + tegra_hwpm_err(hwpm, "exec_reg_ops %d failed", op_idx); + exec_reg_ops->b_all_reg_ops_passed = false; + if (exec_reg_ops->mode == + TEGRA_SOC_HWPM_REG_OP_MODE_FAIL_ON_FIRST) { + return -EINVAL; + } + } + } + + return 0; +} diff --git a/common/tegra_hwpm_resource_utils.c b/common/tegra_hwpm_resource_utils.c new file mode 100644 index 0000000..0faf73c --- /dev/null +++ b/common/tegra_hwpm_resource_utils.c @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include + +#include +#include +#include + +int tegra_soc_hwpm_reserve_resource(struct tegra_soc_hwpm *hwpm, u32 resource) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = NULL; + u32 ip_idx = TEGRA_SOC_HWPM_IP_INACTIVE; + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + tegra_hwpm_dbg(hwpm, hwpm_info, + "User requesting to reserve resource %d", resource); + + /* Translate resource to ip_idx */ + if (!active_chip->is_resource_active(hwpm, resource, &ip_idx)) { + tegra_hwpm_err(hwpm, "Requested resource %d is unavailable", + resource); + /* Remove after uapi update */ + if (resource == TEGRA_SOC_HWPM_RESOURCE_MSS_NVLINK) { + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "ignoring resource %d", resource); + return 0; + } + return -EINVAL; + } + + /* Get IP structure from ip_idx */ + chip_ip = active_chip->chip_ips[ip_idx]; + + /* Skip IPs which are already reserved (covers PMA and RTR case) */ + if (chip_ip->reserved) { + tegra_hwpm_dbg(hwpm, hwpm_info, + "Chip IP %d already reserved", ip_idx); + return 0; + } + + /* Make sure IP override is not enabled */ + if (chip_ip->override_enable) { + tegra_hwpm_dbg(hwpm, hwpm_info, + "Chip IP %d not available", ip_idx); + return 0; + } + + if (active_chip->reserve_given_resource == NULL) { + tegra_hwpm_err(hwpm, + "reserve_given_resource HAL uninitialized"); + return -ENODEV; + } + ret = active_chip->reserve_given_resource(hwpm, ip_idx); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to reserve resource %d", resource); + return ret; + } + + return 0; +} + +int tegra_soc_hwpm_release_resources(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->active_chip->release_all_resources == NULL) { + tegra_hwpm_err(hwpm, "release_resources HAL uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->release_all_resources(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "failed to release resources"); + return ret; + } + + return 0; +} + +int tegra_soc_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->active_chip->bind_reserved_resources == NULL) { + tegra_hwpm_err(hwpm, + "bind_reserved_resources HAL uninitialized"); + return -ENODEV; + } + ret = hwpm->active_chip->bind_reserved_resources(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "failed to bind resources"); + return ret; + } + + return 0; +} diff --git a/hal/t234/hw/t234_addr_map_soc_hwpm.h b/hal/t234/hw/t234_addr_map_soc_hwpm.h index ac72d46..b750f62 100644 --- a/hal/t234/hw/t234_addr_map_soc_hwpm.h +++ b/hal/t234/hw/t234_addr_map_soc_hwpm.h @@ -1,23 +1,17 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ /* * Function/Macro naming determines intended use: @@ -53,31 +47,71 @@ * comparison with unshifted values appropriate for use in field * of register . */ -#ifndef T234_ADDR_MAP_SOC_HWPM_H -#define T234_ADDR_MAP_SOC_HWPM_H +#ifndef TEGRA_T234_ADDR_MAP_SOC_HWPM_H +#define TEGRA_T234_ADDR_MAP_SOC_HWPM_H #define addr_map_rpg_pm_base_r() (0x0f100000U) #define addr_map_rpg_pm_limit_r() (0x0f149fffU) +#define addr_map_rpg_pm_pma_base_r() (0x0f10b000U) +#define addr_map_rpg_pm_pma_limit_r() (0x0f10bfffU) #define addr_map_pma_base_r() (0x0f14a000U) #define addr_map_pma_limit_r() (0x0f14bfffU) #define addr_map_rtr_base_r() (0x0f14d000U) #define addr_map_rtr_limit_r() (0x0f14dfffU) +#define addr_map_rpg_pm_disp_base_r() (0x0f10a000U) +#define addr_map_rpg_pm_disp_limit_r() (0x0f10afffU) #define addr_map_disp_base_r() (0x13800000U) #define addr_map_disp_limit_r() (0x138effffU) +#define addr_map_rpg_pm_vi0_base_r() (0x0f100000U) +#define addr_map_rpg_pm_vi0_limit_r() (0x0f100fffU) +#define addr_map_rpg_pm_vi1_base_r() (0x0f101000U) +#define addr_map_rpg_pm_vi1_limit_r() (0x0f101fffU) #define addr_map_vi_thi_base_r() (0x15f00000U) #define addr_map_vi_thi_limit_r() (0x15ffffffU) #define addr_map_vi2_thi_base_r() (0x14f00000U) #define addr_map_vi2_thi_limit_r() (0x14ffffffU) +#define addr_map_rpg_pm_vic_base_r() (0x0f103000U) +#define addr_map_rpg_pm_vic_limit_r() (0x0f103fffU) #define addr_map_vic_base_r() (0x15340000U) #define addr_map_vic_limit_r() (0x1537ffffU) +#define addr_map_rpg_pm_nvdec0_base_r() (0x0f111000U) +#define addr_map_rpg_pm_nvdec0_limit_r() (0x0f111fffU) #define addr_map_nvdec_base_r() (0x15480000U) #define addr_map_nvdec_limit_r() (0x154bffffU) +#define addr_map_rpg_pm_nvenc0_base_r() (0x0f112000U) +#define addr_map_rpg_pm_nvenc0_limit_r() (0x0f112fffU) #define addr_map_nvenc_base_r() (0x154c0000U) #define addr_map_nvenc_limit_r() (0x154fffffU) +#define addr_map_rpg_pm_ofa_base_r() (0x0f104000U) +#define addr_map_rpg_pm_ofa_limit_r() (0x0f104fffU) #define addr_map_ofa_base_r() (0x15a50000U) #define addr_map_ofa_limit_r() (0x15a5ffffU) +#define addr_map_rpg_pm_isp0_base_r() (0x0f102000U) +#define addr_map_rpg_pm_isp0_limit_r() (0x0f102fffU) #define addr_map_isp_thi_base_r() (0x14b00000U) #define addr_map_isp_thi_limit_r() (0x14bfffffU) +#define addr_map_rpg_pm_pcie_c0_base_r() (0x0f114000U) +#define addr_map_rpg_pm_pcie_c0_limit_r() (0x0f114fffU) +#define addr_map_rpg_pm_pcie_c1_base_r() (0x0f115000U) +#define addr_map_rpg_pm_pcie_c1_limit_r() (0x0f115fffU) +#define addr_map_rpg_pm_pcie_c2_base_r() (0x0f116000U) +#define addr_map_rpg_pm_pcie_c2_limit_r() (0x0f116fffU) +#define addr_map_rpg_pm_pcie_c3_base_r() (0x0f117000U) +#define addr_map_rpg_pm_pcie_c3_limit_r() (0x0f117fffU) +#define addr_map_rpg_pm_pcie_c4_base_r() (0x0f118000U) +#define addr_map_rpg_pm_pcie_c4_limit_r() (0x0f118fffU) +#define addr_map_rpg_pm_pcie_c5_base_r() (0x0f119000U) +#define addr_map_rpg_pm_pcie_c5_limit_r() (0x0f119fffU) +#define addr_map_rpg_pm_pcie_c6_base_r() (0x0f11a000U) +#define addr_map_rpg_pm_pcie_c6_limit_r() (0x0f11afffU) +#define addr_map_rpg_pm_pcie_c7_base_r() (0x0f11b000U) +#define addr_map_rpg_pm_pcie_c7_limit_r() (0x0f11bfffU) +#define addr_map_rpg_pm_pcie_c8_base_r() (0x0f11c000U) +#define addr_map_rpg_pm_pcie_c8_limit_r() (0x0f11cfffU) +#define addr_map_rpg_pm_pcie_c9_base_r() (0x0f11d000U) +#define addr_map_rpg_pm_pcie_c9_limit_r() (0x0f11dfffU) +#define addr_map_rpg_pm_pcie_c10_base_r() (0x0f11e000U) +#define addr_map_rpg_pm_pcie_c10_limit_r() (0x0f11efffU) #define addr_map_pcie_c0_ctl_base_r() (0x14180000U) #define addr_map_pcie_c0_ctl_limit_r() (0x1419ffffU) #define addr_map_pcie_c1_ctl_base_r() (0x14100000U) @@ -100,22 +134,70 @@ #define addr_map_pcie_c9_ctl_limit_r() (0x140dffffU) #define addr_map_pcie_c10_ctl_base_r() (0x140e0000U) #define addr_map_pcie_c10_ctl_limit_r() (0x140fffffU) +#define addr_map_rpg_pm_pva0_0_base_r() (0x0f105000U) +#define addr_map_rpg_pm_pva0_0_limit_r() (0x0f105fffU) +#define addr_map_rpg_pm_pva0_1_base_r() (0x0f106000U) +#define addr_map_rpg_pm_pva0_1_limit_r() (0x0f106fffU) +#define addr_map_rpg_pm_pva0_2_base_r() (0x0f107000U) +#define addr_map_rpg_pm_pva0_2_limit_r() (0x0f107fffU) #define addr_map_pva0_pm_base_r() (0x16200000U) #define addr_map_pva0_pm_limit_r() (0x1620ffffU) +#define addr_map_rpg_pm_nvdla0_base_r() (0x0f108000U) +#define addr_map_rpg_pm_nvdla0_limit_r() (0x0f108fffU) +#define addr_map_rpg_pm_nvdla1_base_r() (0x0f109000U) +#define addr_map_rpg_pm_nvdla1_limit_r() (0x0f109fffU) #define addr_map_nvdla0_base_r() (0x15880000U) #define addr_map_nvdla0_limit_r() (0x158bffffU) #define addr_map_nvdla1_base_r() (0x158c0000U) #define addr_map_nvdla1_limit_r() (0x158fffffU) -#define addr_map_mgbe0_base_r() (0x06800000U) -#define addr_map_mgbe0_limit_r() (0x068fffffU) -#define addr_map_mgbe1_base_r() (0x06900000U) -#define addr_map_mgbe1_limit_r() (0x069fffffU) -#define addr_map_mgbe2_base_r() (0x06a00000U) -#define addr_map_mgbe2_limit_r() (0x06afffffU) -#define addr_map_mgbe3_base_r() (0x06b00000U) -#define addr_map_mgbe3_limit_r() (0x06bfffffU) -#define addr_map_mcb_base_r() (0x02c10000U) -#define addr_map_mcb_limit_r() (0x02c1ffffU) +#define addr_map_rpg_pm_mgbe0_base_r() (0x0f10c000U) +#define addr_map_rpg_pm_mgbe0_limit_r() (0x0f10cfffU) +#define addr_map_rpg_pm_mgbe1_base_r() (0x0f10d000U) +#define addr_map_rpg_pm_mgbe1_limit_r() (0x0f10dfffU) +#define addr_map_rpg_pm_mgbe2_base_r() (0x0f10e000U) +#define addr_map_rpg_pm_mgbe2_limit_r() (0x0f10efffU) +#define addr_map_rpg_pm_mgbe3_base_r() (0x0f10f000U) +#define addr_map_rpg_pm_mgbe3_limit_r() (0x0f10ffffU) +#define addr_map_mgbe0_mac_rm_base_r() (0x06810000U) +#define addr_map_mgbe0_mac_rm_limit_r() (0x0681ffffU) +#define addr_map_mgbe1_mac_rm_base_r() (0x06910000U) +#define addr_map_mgbe1_mac_rm_limit_r() (0x0691ffffU) +#define addr_map_mgbe2_mac_rm_base_r() (0x06a10000U) +#define addr_map_mgbe2_mac_rm_limit_r() (0x06a1ffffU) +#define addr_map_mgbe3_mac_rm_base_r() (0x06b10000U) +#define addr_map_mgbe3_mac_rm_limit_r() (0x06b1ffffU) +#define addr_map_rpg_pm_mss0_base_r() (0x0f11f000U) +#define addr_map_rpg_pm_mss0_limit_r() (0x0f11ffffU) +#define addr_map_rpg_pm_mss1_base_r() (0x0f120000U) +#define addr_map_rpg_pm_mss1_limit_r() (0x0f120fffU) +#define addr_map_rpg_pm_mss2_base_r() (0x0f121000U) +#define addr_map_rpg_pm_mss2_limit_r() (0x0f121fffU) +#define addr_map_rpg_pm_mss3_base_r() (0x0f122000U) +#define addr_map_rpg_pm_mss3_limit_r() (0x0f122fffU) +#define addr_map_rpg_pm_mss4_base_r() (0x0f123000U) +#define addr_map_rpg_pm_mss4_limit_r() (0x0f123fffU) +#define addr_map_rpg_pm_mss5_base_r() (0x0f124000U) +#define addr_map_rpg_pm_mss5_limit_r() (0x0f124fffU) +#define addr_map_rpg_pm_mss6_base_r() (0x0f125000U) +#define addr_map_rpg_pm_mss6_limit_r() (0x0f125fffU) +#define addr_map_rpg_pm_mss7_base_r() (0x0f126000U) +#define addr_map_rpg_pm_mss7_limit_r() (0x0f126fffU) +#define addr_map_rpg_pm_mss8_base_r() (0x0f127000U) +#define addr_map_rpg_pm_mss8_limit_r() (0x0f127fffU) +#define addr_map_rpg_pm_mss9_base_r() (0x0f128000U) +#define addr_map_rpg_pm_mss9_limit_r() (0x0f128fffU) +#define addr_map_rpg_pm_mss10_base_r() (0x0f129000U) +#define addr_map_rpg_pm_mss10_limit_r() (0x0f129fffU) +#define addr_map_rpg_pm_mss11_base_r() (0x0f12a000U) +#define addr_map_rpg_pm_mss11_limit_r() (0x0f12afffU) +#define addr_map_rpg_pm_mss12_base_r() (0x0f12b000U) +#define addr_map_rpg_pm_mss12_limit_r() (0x0f12bfffU) +#define addr_map_rpg_pm_mss13_base_r() (0x0f12c000U) +#define addr_map_rpg_pm_mss13_limit_r() (0x0f12cfffU) +#define addr_map_rpg_pm_mss14_base_r() (0x0f12d000U) +#define addr_map_rpg_pm_mss14_limit_r() (0x0f12dfffU) +#define addr_map_rpg_pm_mss15_base_r() (0x0f12e000U) +#define addr_map_rpg_pm_mss15_limit_r() (0x0f12efffU) #define addr_map_mc0_base_r() (0x02c20000U) #define addr_map_mc0_limit_r() (0x02c2ffffU) #define addr_map_mc1_base_r() (0x02c30000U) @@ -148,6 +230,20 @@ #define addr_map_mc14_limit_r() (0x0176ffffU) #define addr_map_mc15_base_r() (0x01770000U) #define addr_map_mc15_limit_r() (0x0177ffffU) +#define addr_map_mcb_base_r() (0x02c10000U) +#define addr_map_mcb_limit_r() (0x02c1ffffU) +#define addr_map_rpg_pm_msshub0_base_r() (0x0f12f000U) +#define addr_map_rpg_pm_msshub0_limit_r() (0x0f12ffffU) +#define addr_map_rpg_pm_msshub1_base_r() (0x0f130000U) +#define addr_map_rpg_pm_msshub1_limit_r() (0x0f130fffU) +#define addr_map_rpg_pm_mcf0_base_r() (0x0f131000U) +#define addr_map_rpg_pm_mcf0_limit_r() (0x0f131fffU) +#define addr_map_rpg_pm_mcf1_base_r() (0x0f132000U) +#define addr_map_rpg_pm_mcf1_limit_r() (0x0f132fffU) +#define addr_map_rpg_pm_mcf2_base_r() (0x0f133000U) +#define addr_map_rpg_pm_mcf2_limit_r() (0x0f133fffU) +#define addr_map_rpg_pm_mssnvl_base_r() (0x0f113000U) +#define addr_map_rpg_pm_mssnvl_limit_r() (0x0f113fffU) #define addr_map_mss_nvlink_1_base_r() (0x01f20000U) #define addr_map_mss_nvlink_1_limit_r() (0x01f3ffffU) #define addr_map_mss_nvlink_2_base_r() (0x01f40000U) @@ -164,4 +260,6 @@ #define addr_map_mss_nvlink_7_limit_r() (0x01ffffffU) #define addr_map_mss_nvlink_8_base_r() (0x01e00000U) #define addr_map_mss_nvlink_8_limit_r() (0x01e1ffffU) +#define addr_map_rpg_pm_scf_base_r() (0x0f110000U) +#define addr_map_rpg_pm_scf_limit_r() (0x0f110fffU) #endif diff --git a/hal/t234/hw/t234_pmasys_soc_hwpm.h b/hal/t234/hw/t234_pmasys_soc_hwpm.h index 6ba519c..f8762d3 100644 --- a/hal/t234/hw/t234_pmasys_soc_hwpm.h +++ b/hal/t234/hw/t234_pmasys_soc_hwpm.h @@ -1,23 +1,17 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ /* * Function/Macro naming determines intended use: @@ -53,8 +47,8 @@ * comparison with unshifted values appropriate for use in field * of register . */ -#ifndef T234_PMASYS_SOC_HWPM_H -#define T234_PMASYS_SOC_HWPM_H +#ifndef TEGRA_T234_PMASYS_SOC_HWPM_H +#define TEGRA_T234_PMASYS_SOC_HWPM_H #define pmasys_cg2_r() (0x0f14a044U) #define pmasys_cg2_slcg_f(v) (((v) & 0x1U) << 0U) diff --git a/hal/t234/hw/t234_pmmsys_soc_hwpm.h b/hal/t234/hw/t234_pmmsys_soc_hwpm.h index c751795..394b067 100644 --- a/hal/t234/hw/t234_pmmsys_soc_hwpm.h +++ b/hal/t234/hw/t234_pmmsys_soc_hwpm.h @@ -1,23 +1,17 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . */ /* * Function/Macro naming determines intended use: @@ -53,8 +47,8 @@ * comparison with unshifted values appropriate for use in field * of register . */ -#ifndef T234_PMMSYS_SOC_HWPM_H -#define T234_PMMSYS_SOC_HWPM_H +#ifndef TEGRA_T234_PMMSYS_SOC_HWPM_H +#define TEGRA_T234_PMMSYS_SOC_HWPM_H #define pmmsys_perdomain_offset_v() (0x00001000U) #define pmmsys_control_r(i)\ diff --git a/hal/t234/ip/display/t234_hwpm_ip_display.c b/hal/t234/ip/display/t234_hwpm_ip_display.c new file mode 100644 index 0000000..216cca5 --- /dev/null +++ b/hal/t234/ip/display/t234_hwpm_ip_display.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_display.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_display_perfmon_static_array[ + T234_HWPM_IP_DISPLAY_NUM_PERFMON_PER_INST * + T234_HWPM_IP_DISPLAY_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_nvdisplay0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_disp_base_r(), + .end_abs_pa = addr_map_rpg_pm_disp_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_display_perfmux_static_array[ + T234_HWPM_IP_DISPLAY_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_DISPLAY_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_disp_base_r(), + .end_abs_pa = addr_map_disp_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_disp_alist, + .alist_size = ARRAY_SIZE(t234_disp_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_display = { + .num_instances = T234_HWPM_IP_DISPLAY_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_DISPLAY_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_DISPLAY_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_disp_base_r(), + .perfmon_range_end = addr_map_rpg_pm_disp_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_disp_limit_r() - + addr_map_rpg_pm_disp_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_disp_base_r(), + .perfmux_range_end = addr_map_disp_limit_r(), + .inst_perfmux_stride = addr_map_disp_limit_r() - + addr_map_disp_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_display_perfmon_static_array, + .perfmux_static_array = t234_display_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/display/t234_hwpm_ip_display.h b/hal/t234/ip/display/t234_hwpm_ip_display.h new file mode 100644 index 0000000..711032c --- /dev/null +++ b/hal/t234/ip/display/t234_hwpm_ip_display.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_DISPLAY_H +#define T234_HWPM_IP_DISPLAY_H + +#if defined(CONFIG_SOC_HWPM_IP_DISPLAY) +#define T234_HWPM_ACTIVE_IP_DISPLAY \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_DISPLAY), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_DISPLAY_NUM_INSTANCES 1U +#define T234_HWPM_IP_DISPLAY_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_DISPLAY_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_display; + +#else +#define T234_HWPM_ACTIVE_IP_DISPLAY +#endif + +#endif /* T234_HWPM_IP_DISPLAY_H */ diff --git a/hal/t234/ip/isp/t234_hwpm_ip_isp.c b/hal/t234/ip/isp/t234_hwpm_ip_isp.c new file mode 100644 index 0000000..d522619 --- /dev/null +++ b/hal/t234/ip/isp/t234_hwpm_ip_isp.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_isp.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_isp_perfmon_static_array[ + T234_HWPM_IP_ISP_NUM_PERFMON_PER_INST * + T234_HWPM_IP_ISP_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_isp0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_isp0_base_r(), + .end_abs_pa = addr_map_rpg_pm_isp0_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_isp_perfmux_static_array[ + T234_HWPM_IP_ISP_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_ISP_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_isp_thi_base_r(), + .end_abs_pa = addr_map_isp_thi_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_isp_thi_alist, + .alist_size = ARRAY_SIZE(t234_isp_thi_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_isp = { + .num_instances = T234_HWPM_IP_ISP_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_ISP_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_ISP_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_isp0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_isp0_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_isp0_limit_r() - + addr_map_rpg_pm_isp0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_isp_thi_base_r(), + .perfmux_range_end = addr_map_isp_thi_limit_r(), + .inst_perfmux_stride = addr_map_isp_thi_limit_r() - + addr_map_isp_thi_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_isp_perfmon_static_array, + .perfmux_static_array = t234_isp_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/isp/t234_hwpm_ip_isp.h b/hal/t234/ip/isp/t234_hwpm_ip_isp.h new file mode 100644 index 0000000..4107cc0 --- /dev/null +++ b/hal/t234/ip/isp/t234_hwpm_ip_isp.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_ISP_H +#define T234_HWPM_IP_ISP_H + +#if defined(CONFIG_SOC_HWPM_IP_ISP) +#define T234_HWPM_ACTIVE_IP_ISP \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_ISP), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_ISP_NUM_INSTANCES 1U +#define T234_HWPM_IP_ISP_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_ISP_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_isp; + +#else +#define T234_HWPM_ACTIVE_IP_ISP +#endif + +#endif /* T234_HWPM_IP_ISP_H */ diff --git a/hal/t234/ip/mgbe/t234_hwpm_ip_mgbe.c b/hal/t234/ip/mgbe/t234_hwpm_ip_mgbe.c new file mode 100644 index 0000000..a5327db --- /dev/null +++ b/hal/t234/ip/mgbe/t234_hwpm_ip_mgbe.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_mgbe.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_mgbe_perfmon_static_array[ + T234_HWPM_IP_MGBE_NUM_PERFMON_PER_INST * + T234_HWPM_IP_MGBE_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_mgbe0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mgbe0_base_r(), + .end_abs_pa = addr_map_rpg_pm_mgbe0_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_mgbe1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mgbe1_base_r(), + .end_abs_pa = addr_map_rpg_pm_mgbe1_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = "perfmon_mgbe2", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mgbe2_base_r(), + .end_abs_pa = addr_map_rpg_pm_mgbe2_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = "perfmon_mgbe3", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mgbe3_base_r(), + .end_abs_pa = addr_map_rpg_pm_mgbe3_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_mgbe_perfmux_static_array[ + T234_HWPM_IP_MGBE_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_MGBE_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mgbe0_mac_rm_base_r(), + .end_abs_pa = addr_map_mgbe0_mac_rm_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mgbe_alist, + .alist_size = ARRAY_SIZE(t234_mgbe_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mgbe1_mac_rm_base_r(), + .end_abs_pa = addr_map_mgbe1_mac_rm_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mgbe_alist, + .alist_size = ARRAY_SIZE(t234_mgbe_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mgbe2_mac_rm_base_r(), + .end_abs_pa = addr_map_mgbe2_mac_rm_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mgbe_alist, + .alist_size = ARRAY_SIZE(t234_mgbe_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mgbe3_mac_rm_base_r(), + .end_abs_pa = addr_map_mgbe3_mac_rm_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mgbe_alist, + .alist_size = ARRAY_SIZE(t234_mgbe_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_mgbe = { + .num_instances = T234_HWPM_IP_MGBE_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_MGBE_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_MGBE_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_mgbe0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_mgbe3_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_mgbe0_limit_r() - + addr_map_rpg_pm_mgbe0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_mgbe0_mac_rm_base_r(), + .perfmux_range_end = addr_map_mgbe3_mac_rm_limit_r(), + .inst_perfmux_stride = addr_map_mgbe0_mac_rm_limit_r() - + addr_map_mgbe0_mac_rm_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_mgbe_perfmon_static_array, + .perfmux_static_array = t234_mgbe_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/mgbe/t234_hwpm_ip_mgbe.h b/hal/t234/ip/mgbe/t234_hwpm_ip_mgbe.h new file mode 100644 index 0000000..6fac4bc --- /dev/null +++ b/hal/t234/ip/mgbe/t234_hwpm_ip_mgbe.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_MGBE_H +#define T234_HWPM_IP_MGBE_H + +#if defined(CONFIG_SOC_HWPM_IP_MGBE) +#define T234_HWPM_ACTIVE_IP_MGBE \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_MGBE), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_MGBE_NUM_INSTANCES 4U +#define T234_HWPM_IP_MGBE_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_MGBE_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_mgbe; + +#else +#define T234_HWPM_ACTIVE_IP_MGBE +#endif + +#endif /* T234_HWPM_IP_MGBE_H */ diff --git a/hal/t234/ip/mss_channel/t234_hwpm_ip_mss_channel.c b/hal/t234/ip/mss_channel/t234_hwpm_ip_mss_channel.c new file mode 100644 index 0000000..3f11334 --- /dev/null +++ b/hal/t234/ip/mss_channel/t234_hwpm_ip_mss_channel.c @@ -0,0 +1,725 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_mss_channel.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_mss_channel_perfmon_static_array[ + T234_HWPM_IP_MSS_CHANNEL_NUM_PERFMON_PER_INST * + T234_HWPM_IP_MSS_CHANNEL_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_msschannel_parta0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss0_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss0_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_msschannel_parta1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss1_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss1_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_msschannel_parta2", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss2_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss2_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_msschannel_parta3", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss3_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss3_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partb0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss4_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss4_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partb1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss5_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss5_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partb2", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss6_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss6_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partb3", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss7_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss7_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partc0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss8_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss8_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partc1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss9_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss9_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partc2", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss10_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss10_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partc3", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss11_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss11_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partd0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss12_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss12_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partd1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss13_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss13_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partd2", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss14_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss14_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = "perfmon_msschannel_partd3", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mss15_base_r(), + .end_abs_pa = addr_map_rpg_pm_mss15_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_mss_channel_perfmux_static_array[ + T234_HWPM_IP_MSS_CHANNEL_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_MSS_CHANNEL_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc8_base_r(), + .end_abs_pa = addr_map_mc8_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc9_base_r(), + .end_abs_pa = addr_map_mc9_limit_r(), + .fake_registers = NULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc10_base_r(), + .end_abs_pa = addr_map_mc10_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc11_base_r(), + .end_abs_pa = addr_map_mc11_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc12_base_r(), + .end_abs_pa = addr_map_mc12_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc13_base_r(), + .end_abs_pa = addr_map_mc13_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc14_base_r(), + .end_abs_pa = addr_map_mc14_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc15_base_r(), + .end_abs_pa = addr_map_mc15_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc4_base_r(), + .end_abs_pa = addr_map_mc4_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc5_base_r(), + .end_abs_pa = addr_map_mc5_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc6_base_r(), + .end_abs_pa = addr_map_mc6_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc7_base_r(), + .end_abs_pa = addr_map_mc7_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc0_base_r(), + .end_abs_pa = addr_map_mc0_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc1_base_r(), + .end_abs_pa = addr_map_mc1_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc2_base_r(), + .end_abs_pa = addr_map_mc2_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist),\ + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc3_base_r(), + .end_abs_pa = addr_map_mc3_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mss_channel_alist, + .alist_size = ARRAY_SIZE(t234_mss_channel_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_mss_channel = { + .num_instances = T234_HWPM_IP_MSS_CHANNEL_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_MSS_CHANNEL_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_MSS_CHANNEL_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_mss0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_mss15_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_mss0_limit_r() - + addr_map_rpg_pm_mss0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_mc8_base_r(), + .perfmux_range_end = addr_map_mc3_limit_r(), + .inst_perfmux_stride = addr_map_mc0_limit_r() - + addr_map_mc0_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_mss_channel_perfmon_static_array, + .perfmux_static_array = t234_mss_channel_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/mss_channel/t234_hwpm_ip_mss_channel.h b/hal/t234/ip/mss_channel/t234_hwpm_ip_mss_channel.h new file mode 100644 index 0000000..7ac672a --- /dev/null +++ b/hal/t234/ip/mss_channel/t234_hwpm_ip_mss_channel.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_MSS_CHANNEL_H +#define T234_HWPM_IP_MSS_CHANNEL_H + +#if defined(CONFIG_SOC_HWPM_IP_MSS_CHANNEL) +#define T234_HWPM_ACTIVE_IP_MSS_CHANNEL \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_MSS_CHANNEL), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_MSS_CHANNEL_NUM_INSTANCES 4U +#define T234_HWPM_IP_MSS_CHANNEL_NUM_PERFMON_PER_INST 4U +#define T234_HWPM_IP_MSS_CHANNEL_NUM_PERFMUX_PER_INST 4U + +extern struct hwpm_ip t234_hwpm_ip_mss_channel; + +#else +#define T234_HWPM_ACTIVE_IP_MSS_CHANNEL +#endif + +#endif /* T234_HWPM_IP_MSS_CHANNEL_H */ diff --git a/hal/t234/ip/mss_gpu_hub/t234_hwpm_ip_mss_gpu_hub.c b/hal/t234/ip/mss_gpu_hub/t234_hwpm_ip_mss_gpu_hub.c new file mode 100644 index 0000000..ee8c8aa --- /dev/null +++ b/hal/t234/ip/mss_gpu_hub/t234_hwpm_ip_mss_gpu_hub.c @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_mss_gpu_hub.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_mss_gpu_hub_perfmon_static_array[ + T234_HWPM_IP_MSS_GPU_HUB_NUM_PERFMON_PER_INST * + T234_HWPM_IP_MSS_GPU_HUB_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_mssnvlhsh0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mssnvl_base_r(), + .end_abs_pa = addr_map_rpg_pm_mssnvl_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_mss_gpu_hub_perfmux_static_array[ + T234_HWPM_IP_MSS_GPU_HUB_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_MSS_GPU_HUB_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mss_nvlink_8_base_r(), + .end_abs_pa = addr_map_mss_nvlink_8_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mss_nvlink_alist, + .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mss_nvlink_1_base_r(), + .end_abs_pa = addr_map_mss_nvlink_1_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mss_nvlink_alist, + .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mss_nvlink_2_base_r(), + .end_abs_pa = addr_map_mss_nvlink_2_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mss_nvlink_alist, + .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mss_nvlink_3_base_r(), + .end_abs_pa = addr_map_mss_nvlink_3_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mss_nvlink_alist, + .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mss_nvlink_4_base_r(), + .end_abs_pa = addr_map_mss_nvlink_4_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mss_nvlink_alist, + .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mss_nvlink_5_base_r(), + .end_abs_pa = addr_map_mss_nvlink_5_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mss_nvlink_alist, + .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mss_nvlink_6_base_r(), + .end_abs_pa = addr_map_mss_nvlink_6_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mss_nvlink_alist, + .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mss_nvlink_7_base_r(), + .end_abs_pa = addr_map_mss_nvlink_7_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_mss_nvlink_alist, + .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_mss_gpu_hub = { + .num_instances = T234_HWPM_IP_MSS_GPU_HUB_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_MSS_GPU_HUB_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_MSS_GPU_HUB_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_mssnvl_base_r(), + .perfmon_range_end = addr_map_rpg_pm_mssnvl_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_mssnvl_limit_r() - + addr_map_rpg_pm_mssnvl_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_mss_nvlink_8_base_r(), + .perfmux_range_end = addr_map_mss_nvlink_7_limit_r(), + .inst_perfmux_stride = addr_map_mss_nvlink_8_limit_r() - + addr_map_mss_nvlink_8_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_mss_gpu_hub_perfmon_static_array, + .perfmux_static_array = t234_mss_gpu_hub_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/mss_gpu_hub/t234_hwpm_ip_mss_gpu_hub.h b/hal/t234/ip/mss_gpu_hub/t234_hwpm_ip_mss_gpu_hub.h new file mode 100644 index 0000000..38e5c54 --- /dev/null +++ b/hal/t234/ip/mss_gpu_hub/t234_hwpm_ip_mss_gpu_hub.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_MSS_GPU_HUB_H +#define T234_HWPM_IP_MSS_GPU_HUB_H + +#if defined(CONFIG_SOC_HWPM_IP_MSS_GPU_HUB) +#define T234_HWPM_ACTIVE_IP_MSS_GPU_HUB \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_MSS_GPU_HUB), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_MSS_GPU_HUB_NUM_INSTANCES 1U +#define T234_HWPM_IP_MSS_GPU_HUB_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_MSS_GPU_HUB_NUM_PERFMUX_PER_INST 8U + +extern struct hwpm_ip t234_hwpm_ip_mss_gpu_hub; + +#else +#define T234_HWPM_ACTIVE_IP_MSS_GPU_HUB +#endif + +#endif /* T234_HWPM_IP_MSS_GPU_HUB_H */ diff --git a/hal/t234/ip/mss_iso_niso_hubs/t234_hwpm_ip_mss_iso_niso_hubs.c b/hal/t234/ip/mss_iso_niso_hubs/t234_hwpm_ip_mss_iso_niso_hubs.c new file mode 100644 index 0000000..3c95ebd --- /dev/null +++ b/hal/t234/ip/mss_iso_niso_hubs/t234_hwpm_ip_mss_iso_niso_hubs.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_mss_iso_niso_hubs.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_mss_iso_niso_hubs_perfmon_static_array[ + T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_PERFMON_PER_INST * + T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_msshub0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_msshub0_base_r(), + .end_abs_pa = addr_map_rpg_pm_msshub0_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_msshub1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_msshub1_base_r(), + .end_abs_pa = addr_map_rpg_pm_msshub1_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_mss_iso_niso_hubs_perfmux_static_array[ + T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc8_base_r(), + .end_abs_pa = addr_map_mc8_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc8_res_mss_iso_niso_hub_alist, + .alist_size = ARRAY_SIZE(t234_mc8_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc4_base_r(), + .end_abs_pa = addr_map_mc4_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, + .alist_size = + ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc5_base_r(), + .end_abs_pa = addr_map_mc5_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, + .alist_size = + ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc6_base_r(), + .end_abs_pa = addr_map_mc6_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, + .alist_size = + ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc7_base_r(), + .end_abs_pa = addr_map_mc7_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, + .alist_size = + ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc0_base_r(), + .end_abs_pa = addr_map_mc0_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, + .alist_size = + ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc1_base_r(), + .end_abs_pa = addr_map_mc1_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, + .alist_size = + ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc2_base_r(), + .end_abs_pa = addr_map_mc2_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, + .alist_size = + ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc3_base_r(), + .end_abs_pa = addr_map_mc3_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, + .alist_size = + ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_mss_iso_niso_hubs = { + .num_instances = T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_INSTANCES, + .num_perfmon_per_inst = + T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = + T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_msshub0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_msshub1_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_msshub0_limit_r() - + addr_map_rpg_pm_msshub0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_mc8_base_r(), + .perfmux_range_end = addr_map_mc3_limit_r(), + .inst_perfmux_stride = addr_map_mc8_limit_r() - + addr_map_mc8_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_mss_iso_niso_hubs_perfmon_static_array, + .perfmux_static_array = t234_mss_iso_niso_hubs_perfmux_static_array, + .reserved = false, +}; + diff --git a/hal/t234/ip/mss_iso_niso_hubs/t234_hwpm_ip_mss_iso_niso_hubs.h b/hal/t234/ip/mss_iso_niso_hubs/t234_hwpm_ip_mss_iso_niso_hubs.h new file mode 100644 index 0000000..a6e1e40 --- /dev/null +++ b/hal/t234/ip/mss_iso_niso_hubs/t234_hwpm_ip_mss_iso_niso_hubs.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_MSS_ISO_NISO_HUBS_H +#define T234_HWPM_IP_MSS_ISO_NISO_HUBS_H + +#if defined(CONFIG_SOC_HWPM_IP_MSS_ISO_NISO_HUBS) +#define T234_HWPM_ACTIVE_IP_MSS_ISO_NISO_HUBS \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_MSS_ISO_NISO_HUBS), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_INSTANCES 1U +#define T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_PERFMON_PER_INST 2U +#define T234_HWPM_IP_MSS_ISO_NISO_HUBS_NUM_PERFMUX_PER_INST 9U + +extern struct hwpm_ip t234_hwpm_ip_mss_iso_niso_hubs; + +#else +#define T234_HWPM_ACTIVE_IP_MSS_ISO_NISO_HUBS +#endif + +#endif /* T234_HWPM_IP_MSS_ISO_NISO_HUBS_H */ diff --git a/hal/t234/ip/mss_mcf/t234_hwpm_ip_mss_mcf.c b/hal/t234/ip/mss_mcf/t234_hwpm_ip_mss_mcf.c new file mode 100644 index 0000000..2bbc7e4 --- /dev/null +++ b/hal/t234/ip/mss_mcf/t234_hwpm_ip_mss_mcf.c @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_mss_mcf.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_mss_mcf_perfmon_static_array[ + T234_HWPM_IP_MSS_MCF_NUM_PERFMON_PER_INST * + T234_HWPM_IP_MSS_MCF_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_mssmcfclient0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mcf0_base_r(), + .end_abs_pa = addr_map_rpg_pm_mcf0_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_mssmcfmem0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mcf1_base_r(), + .end_abs_pa = addr_map_rpg_pm_mcf1_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_mssmcfmem1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_mcf2_base_r(), + .end_abs_pa = addr_map_rpg_pm_mcf2_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_mss_mcf_perfmux_static_array[ + T234_HWPM_IP_MSS_MCF_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_MSS_MCF_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc4_base_r(), + .end_abs_pa = addr_map_mc4_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc2to7_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc5_base_r(), + .end_abs_pa = addr_map_mc5_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc2to7_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc6_base_r(), + .end_abs_pa = addr_map_mc6_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc2to7_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc7_base_r(), + .end_abs_pa = addr_map_mc7_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc2to7_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mcb_base_r(), + .end_abs_pa = addr_map_mcb_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mcb_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mcb_mss_mcf_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc0_base_r(), + .end_abs_pa = addr_map_mc0_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to1_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mc0to1_mss_mcf_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc1_base_r(), + .end_abs_pa = addr_map_mc1_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc0to1_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mc0to1_mss_mcf_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc2_base_r(), + .end_abs_pa = addr_map_mc2_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc2to7_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_mc3_base_r(), + .end_abs_pa = addr_map_mc3_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_mc2to7_mss_mcf_alist, + .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_mss_mcf = { + .num_instances = T234_HWPM_IP_MSS_MCF_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_MSS_MCF_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_MSS_MCF_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_mcf0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_mcf2_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_mcf0_limit_r() - + addr_map_rpg_pm_mcf0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_mc4_base_r(), + .perfmux_range_end = addr_map_mc3_limit_r(), + .inst_perfmux_stride = addr_map_mc4_limit_r() - + addr_map_mc4_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_mss_mcf_perfmon_static_array, + .perfmux_static_array = t234_mss_mcf_perfmux_static_array, +}; diff --git a/hal/t234/ip/mss_mcf/t234_hwpm_ip_mss_mcf.h b/hal/t234/ip/mss_mcf/t234_hwpm_ip_mss_mcf.h new file mode 100644 index 0000000..b7ecb51 --- /dev/null +++ b/hal/t234/ip/mss_mcf/t234_hwpm_ip_mss_mcf.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_MSS_MCF_H +#define T234_HWPM_IP_MSS_MCF_H + +#if defined(CONFIG_SOC_HWPM_IP_MSS_MCF) +#define T234_HWPM_ACTIVE_IP_MSS_MCF \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_MSS_MCF), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_MSS_MCF_NUM_INSTANCES 1U +#define T234_HWPM_IP_MSS_MCF_NUM_PERFMON_PER_INST 3U +#define T234_HWPM_IP_MSS_MCF_NUM_PERFMUX_PER_INST 9U + +extern struct hwpm_ip t234_hwpm_ip_mss_mcf; + +#else +#define T234_HWPM_ACTIVE_IP_MSS_MCF +#endif + +#endif /* T234_HWPM_IP_MSS_MCF_H */ diff --git a/hal/t234/ip/nvdec/t234_hwpm_ip_nvdec.c b/hal/t234/ip/nvdec/t234_hwpm_ip_nvdec.c new file mode 100644 index 0000000..3c01440 --- /dev/null +++ b/hal/t234/ip/nvdec/t234_hwpm_ip_nvdec.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_nvdec.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_nvdec_perfmon_static_array[ + T234_HWPM_IP_NVDEC_NUM_PERFMON_PER_INST * + T234_HWPM_IP_NVDEC_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_nvdeca0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_nvdec0_base_r(), + .end_abs_pa = addr_map_rpg_pm_nvdec0_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_nvdec_perfmux_static_array[ + T234_HWPM_IP_NVDEC_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_NVDEC_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_nvdec_base_r(), + .end_abs_pa = addr_map_nvdec_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_nvdec_alist, + .alist_size = ARRAY_SIZE(t234_nvdec_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_nvdec = { + .num_instances = T234_HWPM_IP_NVDEC_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_NVDEC_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_NVDEC_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_nvdec0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_nvdec0_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_nvdec0_limit_r() - + addr_map_rpg_pm_nvdec0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_nvdec_base_r(), + .perfmux_range_end = addr_map_nvdec_limit_r(), + .inst_perfmux_stride = addr_map_nvdec_limit_r() - + addr_map_nvdec_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_nvdec_perfmon_static_array, + .perfmux_static_array = t234_nvdec_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/nvdec/t234_hwpm_ip_nvdec.h b/hal/t234/ip/nvdec/t234_hwpm_ip_nvdec.h new file mode 100644 index 0000000..906c375 --- /dev/null +++ b/hal/t234/ip/nvdec/t234_hwpm_ip_nvdec.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_NVDEC_H +#define T234_HWPM_IP_NVDEC_H + +#if defined(CONFIG_SOC_HWPM_IP_NVDEC) +#define T234_HWPM_ACTIVE_IP_NVDEC \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_NVDEC), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_NVDEC_NUM_INSTANCES 1U +#define T234_HWPM_IP_NVDEC_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_NVDEC_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_nvdec; + +#else +#define T234_HWPM_ACTIVE_IP_NVDEC +#endif + +#endif /* T234_HWPM_IP_NVDEC_H */ diff --git a/hal/t234/ip/nvdla/t234_hwpm_ip_nvdla.c b/hal/t234/ip/nvdla/t234_hwpm_ip_nvdla.c new file mode 100644 index 0000000..e57c139 --- /dev/null +++ b/hal/t234/ip/nvdla/t234_hwpm_ip_nvdla.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_nvdla.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_nvdla_perfmon_static_array[ + T234_HWPM_IP_NVDLA_NUM_PERFMON_PER_INST * + T234_HWPM_IP_NVDLA_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_nvdlab0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_nvdla0_base_r(), + .end_abs_pa = addr_map_rpg_pm_nvdla0_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_nvdlab1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_nvdla1_base_r(), + .end_abs_pa = addr_map_rpg_pm_nvdla1_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_nvdla_perfmux_static_array[ + T234_HWPM_IP_NVDLA_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_NVDLA_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_nvdla0_base_r(), + .end_abs_pa = addr_map_nvdla0_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_nvdla_alist, + .alist_size = ARRAY_SIZE(t234_nvdla_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_nvdla1_base_r(), + .end_abs_pa = addr_map_nvdla1_limit_r(), + .start_pa = 0, + .end_pa = 0, + .base_pa = 0ULL, + .alist = t234_nvdla_alist, + .alist_size = ARRAY_SIZE(t234_nvdla_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_nvdla = { + .num_instances = T234_HWPM_IP_NVDLA_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_NVDLA_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_NVDLA_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_nvdla0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_nvdla1_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_nvdla0_limit_r() - + addr_map_rpg_pm_nvdla0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_nvdla0_base_r(), + .perfmux_range_end = addr_map_nvdla1_limit_r(), + .inst_perfmux_stride = addr_map_nvdla0_limit_r() - + addr_map_nvdla0_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_nvdla_perfmon_static_array, + .perfmux_static_array = t234_nvdla_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/nvdla/t234_hwpm_ip_nvdla.h b/hal/t234/ip/nvdla/t234_hwpm_ip_nvdla.h new file mode 100644 index 0000000..7f312c2 --- /dev/null +++ b/hal/t234/ip/nvdla/t234_hwpm_ip_nvdla.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_NVDLA_H +#define T234_HWPM_IP_NVDLA_H + +#if defined(CONFIG_SOC_HWPM_IP_NVDLA) +#define T234_HWPM_ACTIVE_IP_NVDLA \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_NVDLA), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_NVDLA_NUM_INSTANCES 2U +#define T234_HWPM_IP_NVDLA_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_NVDLA_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_nvdla; + +#else +#define T234_HWPM_ACTIVE_IP_NVDLA +#endif + +#endif /* T234_HWPM_IP_NVDLA_H */ diff --git a/hal/t234/ip/nvenc/t234_hwpm_ip_nvenc.c b/hal/t234/ip/nvenc/t234_hwpm_ip_nvenc.c new file mode 100644 index 0000000..0b598b4 --- /dev/null +++ b/hal/t234/ip/nvenc/t234_hwpm_ip_nvenc.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_nvenc.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_nvenc_perfmon_static_array[ + T234_HWPM_IP_NVENC_NUM_PERFMON_PER_INST * + T234_HWPM_IP_NVENC_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_nvenca0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_nvenc0_base_r(), + .end_abs_pa = addr_map_rpg_pm_nvenc0_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_nvenc_perfmux_static_array[ + T234_HWPM_IP_NVENC_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_NVENC_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_nvenc_base_r(), + .end_abs_pa = addr_map_nvenc_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_nvenc_alist, + .alist_size = ARRAY_SIZE(t234_nvenc_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_nvenc = { + .num_instances = T234_HWPM_IP_NVENC_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_NVENC_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_NVENC_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_nvenc0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_nvenc0_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_nvenc0_limit_r() - + addr_map_rpg_pm_nvenc0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_nvenc_base_r(), + .perfmux_range_end = addr_map_nvenc_limit_r(), + .inst_perfmux_stride = addr_map_nvenc_limit_r() - + addr_map_nvenc_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_nvenc_perfmon_static_array, + .perfmux_static_array = t234_nvenc_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/nvenc/t234_hwpm_ip_nvenc.h b/hal/t234/ip/nvenc/t234_hwpm_ip_nvenc.h new file mode 100644 index 0000000..135e74c --- /dev/null +++ b/hal/t234/ip/nvenc/t234_hwpm_ip_nvenc.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_NVENC_H +#define T234_HWPM_IP_NVENC_H + +#if defined(CONFIG_SOC_HWPM_IP_NVENC) +#define T234_HWPM_ACTIVE_IP_NVENC \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_NVENC), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_NVENC_NUM_INSTANCES 1U +#define T234_HWPM_IP_NVENC_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_NVENC_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_nvenc; + +#else +#define T234_HWPM_ACTIVE_IP_NVENC +#endif + +#endif /* T234_HWPM_IP_NVENC_H */ diff --git a/hal/t234/ip/ofa/t234_hwpm_ip_ofa.c b/hal/t234/ip/ofa/t234_hwpm_ip_ofa.c new file mode 100644 index 0000000..4d4e765 --- /dev/null +++ b/hal/t234/ip/ofa/t234_hwpm_ip_ofa.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_ofa.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_ofa_perfmon_static_array[ + T234_HWPM_IP_OFA_NUM_PERFMON_PER_INST * + T234_HWPM_IP_OFA_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_ofaa0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_ofa_base_r(), + .end_abs_pa = addr_map_rpg_pm_ofa_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_ofa_perfmux_static_array[ + T234_HWPM_IP_OFA_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_OFA_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_ofa_base_r(), + .end_abs_pa = addr_map_ofa_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_ofa_alist, + .alist_size = ARRAY_SIZE(t234_ofa_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_ofa = { + .num_instances = T234_HWPM_IP_OFA_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_OFA_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_OFA_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_ofa_base_r(), + .perfmon_range_end = addr_map_rpg_pm_ofa_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_ofa_limit_r() - + addr_map_rpg_pm_ofa_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_ofa_base_r(), + .perfmux_range_end = addr_map_ofa_limit_r(), + .inst_perfmux_stride = addr_map_ofa_limit_r() - + addr_map_ofa_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_ofa_perfmon_static_array, + .perfmux_static_array = t234_ofa_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/ofa/t234_hwpm_ip_ofa.h b/hal/t234/ip/ofa/t234_hwpm_ip_ofa.h new file mode 100644 index 0000000..4c43fea --- /dev/null +++ b/hal/t234/ip/ofa/t234_hwpm_ip_ofa.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_OFA_H +#define T234_HWPM_IP_OFA_H + +#if defined(CONFIG_SOC_HWPM_IP_OFA) +#define T234_HWPM_ACTIVE_IP_OFA \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_OFA), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_OFA_NUM_INSTANCES 1U +#define T234_HWPM_IP_OFA_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_OFA_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_ofa; + +#else +#define T234_HWPM_ACTIVE_IP_OFA +#endif + +#endif /* T234_HWPM_IP_OFA_H */ diff --git a/hal/t234/ip/pcie/t234_hwpm_ip_pcie.c b/hal/t234/ip/pcie/t234_hwpm_ip_pcie.c new file mode 100644 index 0000000..7bad441 --- /dev/null +++ b/hal/t234/ip/pcie/t234_hwpm_ip_pcie.c @@ -0,0 +1,516 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_pcie.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_pcie_perfmon_static_array[ + T234_HWPM_IP_PCIE_NUM_PERFMON_PER_INST * + T234_HWPM_IP_PCIE_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_pcie0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c0_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c0_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_pcie1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c1_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c1_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = "perfmon_pcie2", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c2_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c2_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = "perfmon_pcie3", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c3_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c3_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(4), + .dt_mmio = NULL, + .name = "perfmon_pcie4", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c4_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c4_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(5), + .dt_mmio = NULL, + .name = "perfmon_pcie5", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c5_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c5_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(6), + .dt_mmio = NULL, + .name = "perfmon_pcie6", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c6_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c6_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(7), + .dt_mmio = NULL, + .name = "perfmon_pcie7", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c7_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c7_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(8), + .dt_mmio = NULL, + .name = "perfmon_pcie8", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c8_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c8_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(9), + .dt_mmio = NULL, + .name = "perfmon_pcie9", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c9_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c9_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(10), + .dt_mmio = NULL, + .name = "perfmon_pcie10", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pcie_c10_base_r(), + .end_abs_pa = addr_map_rpg_pm_pcie_c10_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_pcie_perfmux_static_array[ + T234_HWPM_IP_PCIE_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_PCIE_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(8), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c8_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c8_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(9), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c9_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c9_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(10), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c10_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c10_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c1_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c1_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(2), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c2_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c2_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(3), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c3_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c3_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(4), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c4_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c4_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c0_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c0_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(5), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c5_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c5_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(6), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c6_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c6_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(7), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pcie_c7_ctl_base_r(), + .end_abs_pa = addr_map_pcie_c7_ctl_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pcie_ctl_alist, + .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_pcie = { + .num_instances = T234_HWPM_IP_PCIE_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_PCIE_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_PCIE_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_pcie_c0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_pcie_c10_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_pcie_c0_limit_r() - + addr_map_rpg_pm_pcie_c0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_pcie_c8_ctl_base_r(), + .perfmux_range_end = addr_map_pcie_c7_ctl_limit_r(), + .inst_perfmux_stride = addr_map_pcie_c8_ctl_limit_r() - + addr_map_pcie_c8_ctl_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_pcie_perfmon_static_array, + .perfmux_static_array = t234_pcie_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/pcie/t234_hwpm_ip_pcie.h b/hal/t234/ip/pcie/t234_hwpm_ip_pcie.h new file mode 100644 index 0000000..005a695 --- /dev/null +++ b/hal/t234/ip/pcie/t234_hwpm_ip_pcie.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_PCIE_H +#define T234_HWPM_IP_PCIE_H + +#if defined(CONFIG_SOC_HWPM_IP_PCIE) +#define T234_HWPM_ACTIVE_IP_PCIE \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_PCIE), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_PCIE_NUM_INSTANCES 11U +#define T234_HWPM_IP_PCIE_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_PCIE_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_pcie; + +#else +#define T234_HWPM_ACTIVE_IP_PCIE +#endif + +#endif /* T234_HWPM_IP_PCIE_H */ diff --git a/hal/t234/ip/pma/t234_hwpm_ip_pma.c b/hal/t234/ip/pma/t234_hwpm_ip_pma.c new file mode 100644 index 0000000..20671a6 --- /dev/null +++ b/hal/t234/ip/pma/t234_hwpm_ip_pma.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_pma.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_pma_perfmon_static_array[ + T234_HWPM_IP_PMA_NUM_PERFMON_PER_INST * + T234_HWPM_IP_PMA_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_sys0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pma_base_r(), + .end_abs_pa = addr_map_rpg_pm_pma_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_pma_perfmux_static_array[ + T234_HWPM_IP_PMA_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_PMA_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "pma", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pma_base_r(), + .end_abs_pa = addr_map_pma_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_pma_base_r(), + .alist = t234_pma_res_pma_alist, + .alist_size = ARRAY_SIZE(t234_pma_res_pma_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_pma = { + .num_instances = T234_HWPM_IP_PMA_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_PMA_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_PMA_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_pma_base_r(), + .perfmon_range_end = addr_map_rpg_pm_pma_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_pma_limit_r() - + addr_map_rpg_pm_pma_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_pma_base_r(), + .perfmux_range_end = addr_map_pma_limit_r(), + .inst_perfmux_stride = addr_map_pma_limit_r() - + addr_map_pma_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_pma_perfmon_static_array, + .perfmux_static_array = t234_pma_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/pma/t234_hwpm_ip_pma.h b/hal/t234/ip/pma/t234_hwpm_ip_pma.h new file mode 100644 index 0000000..73ab1c1 --- /dev/null +++ b/hal/t234/ip/pma/t234_hwpm_ip_pma.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_PMA_H +#define T234_HWPM_IP_PMA_H + +#define T234_HWPM_ACTIVE_IP_PMA \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_PMA), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_PMA_NUM_INSTANCES 1U +#define T234_HWPM_IP_PMA_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_PMA_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_pma; + +#endif /* T234_HWPM_IP_PMA_H */ diff --git a/hal/t234/ip/pva/t234_hwpm_ip_pva.c b/hal/t234/ip/pva/t234_hwpm_ip_pva.c new file mode 100644 index 0000000..89c0d2a --- /dev/null +++ b/hal/t234/ip/pva/t234_hwpm_ip_pva.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_pva.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_pva_perfmon_static_array[ + T234_HWPM_IP_PVA_NUM_PERFMON_PER_INST * + T234_HWPM_IP_PVA_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_pvav0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pva0_0_base_r(), + .end_abs_pa = addr_map_rpg_pm_pva0_0_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_pvav1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pva0_1_base_r(), + .end_abs_pa = addr_map_rpg_pm_pva0_1_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_pvac0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_pva0_2_base_r(), + .end_abs_pa = addr_map_rpg_pm_pva0_2_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_pva_perfmux_static_array[ + T234_HWPM_IP_PVA_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_PVA_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pva0_pm_base_r(), + .end_abs_pa = addr_map_pva0_pm_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_pva0_pm_alist, + .alist_size = ARRAY_SIZE(t234_pva0_pm_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_pva = { + .num_instances = T234_HWPM_IP_PVA_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_PVA_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_PVA_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_pva0_0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_pva0_2_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_pva0_0_limit_r() - + addr_map_rpg_pm_pva0_0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_pva0_pm_base_r(), + .perfmux_range_end = addr_map_pva0_pm_limit_r(), + .inst_perfmux_stride = addr_map_pva0_pm_limit_r() - + addr_map_pva0_pm_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_pva_perfmon_static_array, + .perfmux_static_array = t234_pva_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/pva/t234_hwpm_ip_pva.h b/hal/t234/ip/pva/t234_hwpm_ip_pva.h new file mode 100644 index 0000000..bdbe7ff --- /dev/null +++ b/hal/t234/ip/pva/t234_hwpm_ip_pva.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_PVA_H +#define T234_HWPM_IP_PVA_H + +#if defined(CONFIG_SOC_HWPM_IP_PVA) +#define T234_HWPM_ACTIVE_IP_PVA \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_PVA), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_PVA_NUM_INSTANCES 1U +#define T234_HWPM_IP_PVA_NUM_PERFMON_PER_INST 3U +#define T234_HWPM_IP_PVA_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_pva; + +#else +#define T234_HWPM_ACTIVE_IP_PVA +#endif + +#endif /* T234_HWPM_IP_PVA_H */ diff --git a/hal/t234/ip/rtr/t234_hwpm_ip_rtr.c b/hal/t234/ip/rtr/t234_hwpm_ip_rtr.c new file mode 100644 index 0000000..5288992 --- /dev/null +++ b/hal/t234/ip/rtr/t234_hwpm_ip_rtr.c @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_rtr.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_rtr_perfmon_static_array[ + T234_HWPM_IP_RTR_NUM_PERFMON_PER_INST * + T234_HWPM_IP_RTR_NUM_INSTANCES] = { + +}; + +hwpm_ip_perfmux t234_rtr_perfmux_static_array[ + T234_HWPM_IP_RTR_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_RTR_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "rtr", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rtr_base_r(), + .end_abs_pa = addr_map_rtr_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rtr_base_r(), + .alist = t234_rtr_alist, + .alist_size = ARRAY_SIZE(t234_rtr_alist), + .fake_registers = NULL, + }, + /* PMA from RTR perspective */ + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "pma", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_pma_base_r(), + .end_abs_pa = addr_map_pma_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_pma_base_r(), + .alist = t234_pma_res_cmd_slice_rtr_alist, + .alist_size = ARRAY_SIZE(t234_pma_res_cmd_slice_rtr_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_rtr = { + .num_instances = T234_HWPM_IP_RTR_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_RTR_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_RTR_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = 0ULL, + .perfmon_range_end = 0ULL, + .inst_perfmon_stride = 0ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_pma_base_r(), + .perfmux_range_end = addr_map_rtr_limit_r(), + /* Use PMA stride which is larger block than RTR */ + .inst_perfmux_stride = addr_map_pma_limit_r() - + addr_map_pma_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_rtr_perfmon_static_array, + .perfmux_static_array = t234_rtr_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/rtr/t234_hwpm_ip_rtr.h b/hal/t234/ip/rtr/t234_hwpm_ip_rtr.h new file mode 100644 index 0000000..7fc4f0a --- /dev/null +++ b/hal/t234/ip/rtr/t234_hwpm_ip_rtr.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_RTR_H +#define T234_HWPM_IP_RTR_H + +#define T234_HWPM_ACTIVE_IP_RTR \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_RTR), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_RTR_NUM_INSTANCES 1U +#define T234_HWPM_IP_RTR_NUM_PERFMON_PER_INST 0U +#define T234_HWPM_IP_RTR_NUM_PERFMUX_PER_INST 2U + +extern struct hwpm_ip t234_hwpm_ip_rtr; + +#endif /* T234_HWPM_IP_RTR_H */ diff --git a/hal/t234/ip/scf/t234_hwpm_ip_scf.c b/hal/t234/ip/scf/t234_hwpm_ip_scf.c new file mode 100644 index 0000000..c4d6319 --- /dev/null +++ b/hal/t234/ip/scf/t234_hwpm_ip_scf.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_scf.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_scf_perfmon_static_array[ + T234_HWPM_IP_SCF_NUM_PERFMON_PER_INST * + T234_HWPM_IP_SCF_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_scf", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_scf_base_r(), + .end_abs_pa = addr_map_rpg_pm_scf_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_scf_perfmux_static_array[ + T234_HWPM_IP_SCF_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_SCF_NUM_INSTANCES] = { + +}; + +struct hwpm_ip t234_hwpm_ip_scf = { + .num_instances = T234_HWPM_IP_SCF_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_SCF_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_SCF_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_scf_base_r(), + .perfmon_range_end = addr_map_rpg_pm_scf_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_scf_limit_r() - + addr_map_rpg_pm_scf_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = 0ULL, + .perfmux_range_end = 0ULL, + .inst_perfmux_stride = 0U, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_scf_perfmon_static_array, + .perfmux_static_array = t234_scf_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/scf/t234_hwpm_ip_scf.h b/hal/t234/ip/scf/t234_hwpm_ip_scf.h new file mode 100644 index 0000000..38d1a10 --- /dev/null +++ b/hal/t234/ip/scf/t234_hwpm_ip_scf.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_SCF_H +#define T234_HWPM_IP_SCF_H + +#if defined(CONFIG_SOC_HWPM_IP_SCF) +#define T234_HWPM_ACTIVE_IP_SCF \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_SCF), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_SCF_NUM_INSTANCES 1U +#define T234_HWPM_IP_SCF_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_SCF_NUM_PERFMUX_PER_INST 0U + +extern struct hwpm_ip t234_hwpm_ip_scf; + +#else +#define T234_HWPM_ACTIVE_IP_SCF +#endif + +#endif /* T234_HWPM_IP_SCF_H */ diff --git a/hal/t234/ip/vi/t234_hwpm_ip_vi.c b/hal/t234/ip/vi/t234_hwpm_ip_vi.c new file mode 100644 index 0000000..a66fd92 --- /dev/null +++ b/hal/t234/ip/vi/t234_hwpm_ip_vi.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_vi.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_vi_perfmon_static_array[ + T234_HWPM_IP_VI_NUM_PERFMON_PER_INST * + T234_HWPM_IP_VI_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_vi0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_vi0_base_r(), + .end_abs_pa = addr_map_rpg_pm_vi0_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = "perfmon_vi1", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_vi1_base_r(), + .end_abs_pa = addr_map_rpg_pm_vi1_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_vi_perfmux_static_array[ + T234_HWPM_IP_VI_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_VI_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(1), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_vi2_thi_base_r(), + .end_abs_pa = addr_map_vi2_thi_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_vi_thi_alist, + .alist_size = ARRAY_SIZE(t234_vi_thi_alist), + .fake_registers = NULL, + }, + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_vi_thi_base_r(), + .end_abs_pa = addr_map_vi_thi_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_vi_thi_alist, + .alist_size = ARRAY_SIZE(t234_vi_thi_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_vi = { + .num_instances = T234_HWPM_IP_VI_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_VI_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_VI_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_vi0_base_r(), + .perfmon_range_end = addr_map_rpg_pm_vi1_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_vi0_limit_r() - + addr_map_rpg_pm_vi0_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_vi2_thi_base_r(), + .perfmux_range_end = addr_map_vi_thi_limit_r(), + .inst_perfmux_stride = addr_map_vi2_thi_limit_r() - + addr_map_vi2_thi_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_vi_perfmon_static_array, + .perfmux_static_array = t234_vi_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/vi/t234_hwpm_ip_vi.h b/hal/t234/ip/vi/t234_hwpm_ip_vi.h new file mode 100644 index 0000000..b64ce9b --- /dev/null +++ b/hal/t234/ip/vi/t234_hwpm_ip_vi.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_VI_H +#define T234_HWPM_IP_VI_H + +#if defined(CONFIG_SOC_HWPM_IP_VI) +#define T234_HWPM_ACTIVE_IP_VI \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_VI), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_VI_NUM_INSTANCES 2U +#define T234_HWPM_IP_VI_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_VI_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_vi; + +#else +#define T234_HWPM_ACTIVE_IP_VI +#endif + +#endif /* T234_HWPM_IP_VI_H */ diff --git a/hal/t234/ip/vic/t234_hwpm_ip_vic.c b/hal/t234/ip/vic/t234_hwpm_ip_vic.c new file mode 100644 index 0000000..e09612a --- /dev/null +++ b/hal/t234/ip/vic/t234_hwpm_ip_vic.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "t234_hwpm_ip_vic.h" + +#include +#include +#include + +hwpm_ip_perfmon t234_vic_perfmon_static_array[ + T234_HWPM_IP_VIC_NUM_PERFMON_PER_INST * + T234_HWPM_IP_VIC_NUM_INSTANCES] = { + { + .is_hwpm_element = true, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = "perfmon_vica0", + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_rpg_pm_vic_base_r(), + .end_abs_pa = addr_map_rpg_pm_vic_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = addr_map_rpg_pm_base_r(), + .alist = t234_perfmon_alist, + .alist_size = ARRAY_SIZE(t234_perfmon_alist), + .fake_registers = NULL, + }, +}; + +hwpm_ip_perfmux t234_vic_perfmux_static_array[ + T234_HWPM_IP_VIC_NUM_PERFMUX_PER_INST * + T234_HWPM_IP_VIC_NUM_INSTANCES] = { + { + .is_hwpm_element = false, + .hw_inst_mask = BIT(0), + .dt_mmio = NULL, + .name = {'\0'}, + .ip_ops = { + .ip_base_address = 0ULL, + .ip_index = 0U, + .ip_dev = NULL, + .hwpm_ip_pm = NULL, + .hwpm_ip_reg_op = NULL, + }, + .start_abs_pa = addr_map_vic_base_r(), + .end_abs_pa = addr_map_vic_limit_r(), + .start_pa = 0ULL, + .end_pa = 0ULL, + .base_pa = 0ULL, + .alist = t234_vic_alist, + .alist_size = ARRAY_SIZE(t234_vic_alist), + .fake_registers = NULL, + }, +}; + +struct hwpm_ip t234_hwpm_ip_vic = { + .num_instances = T234_HWPM_IP_VIC_NUM_INSTANCES, + .num_perfmon_per_inst = T234_HWPM_IP_VIC_NUM_PERFMON_PER_INST, + .num_perfmux_per_inst = T234_HWPM_IP_VIC_NUM_PERFMUX_PER_INST, + + .perfmon_range_start = addr_map_rpg_pm_vic_base_r(), + .perfmon_range_end = addr_map_rpg_pm_vic_limit_r(), + .inst_perfmon_stride = addr_map_rpg_pm_vic_limit_r() - + addr_map_rpg_pm_vic_base_r() + 1ULL, + .num_perfmon_slots = 0U, + .ip_perfmon = NULL, + + .perfmux_range_start = addr_map_vic_base_r(), + .perfmux_range_end = addr_map_vic_limit_r(), + .inst_perfmux_stride = addr_map_vic_limit_r() - + addr_map_vic_base_r() + 1ULL, + .num_perfmux_slots = 0U, + .ip_perfmux = NULL, + + .override_enable = false, + .fs_mask = 0U, + .perfmon_static_array = t234_vic_perfmon_static_array, + .perfmux_static_array = t234_vic_perfmux_static_array, + .reserved = false, +}; diff --git a/hal/t234/ip/vic/t234_hwpm_ip_vic.h b/hal/t234/ip/vic/t234_hwpm_ip_vic.h new file mode 100644 index 0000000..79af48a --- /dev/null +++ b/hal/t234/ip/vic/t234_hwpm_ip_vic.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_HWPM_IP_VIC_H +#define T234_HWPM_IP_VIC_H + +#if defined(CONFIG_SOC_HWPM_IP_VIC) +#define T234_HWPM_ACTIVE_IP_VIC \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_VIC), + +/* This data should ideally be available in HW headers */ +#define T234_HWPM_IP_VIC_NUM_INSTANCES 1U +#define T234_HWPM_IP_VIC_NUM_PERFMON_PER_INST 1U +#define T234_HWPM_IP_VIC_NUM_PERFMUX_PER_INST 1U + +extern struct hwpm_ip t234_hwpm_ip_vic; + +#else +#define T234_HWPM_ACTIVE_IP_VIC +#endif + +#endif /* T234_HWPM_IP_VIC_H */ diff --git a/hal/t234/t234_hwpm_alist_utils.c b/hal/t234/t234_hwpm_alist_utils.c new file mode 100644 index 0000000..2b2c3fb --- /dev/null +++ b/hal/t234/t234_hwpm_alist_utils.c @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include + +#include +#include +#include +#include +#include + +size_t t234_hwpm_get_alist_buf_size(struct tegra_soc_hwpm *hwpm) +{ + return sizeof(struct allowlist); +} + +int t234_hwpm_zero_alist_regs(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture) +{ + u32 alist_idx = 0U; + + tegra_hwpm_fn(hwpm, " "); + + for (alist_idx = 0; alist_idx < aperture->alist_size; alist_idx++) { + if (aperture->alist[alist_idx].zero_at_init) { + regops_writel(hwpm, aperture, + aperture->start_abs_pa + + aperture->alist[alist_idx].reg_offset, 0U); + } + } + return 0; +} + +int t234_hwpm_get_alist_size(struct tegra_soc_hwpm *hwpm) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + u32 ip_idx; + u32 perfmux_idx, perfmon_idx; + unsigned long inst_idx = 0UL; + unsigned long floorsweep_info = 0UL; + struct hwpm_ip *chip_ip = NULL; + hwpm_ip_perfmux *perfmux = NULL; + hwpm_ip_perfmon *perfmon = NULL; + + tegra_hwpm_fn(hwpm, " "); + + for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) { + chip_ip = active_chip->chip_ips[ip_idx]; + + /* Skip unavailable IPs */ + if (!chip_ip->reserved) { + continue; + } + + if (chip_ip->fs_mask == 0U) { + /* No IP instance is available */ + continue; + } + + floorsweep_info = (unsigned long)chip_ip->fs_mask; + + for_each_set_bit(inst_idx, &floorsweep_info, 32U) { + /* Add perfmux alist size to full alist size */ + for (perfmux_idx = 0U; + perfmux_idx < chip_ip->num_perfmux_slots; + perfmux_idx++) { + perfmux = chip_ip->ip_perfmux[perfmux_idx]; + + if (perfmux == NULL) { + continue; + } + + if (perfmux->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + if (perfmux->alist) { + hwpm->full_alist_size += + perfmux->alist_size; + } else { + tegra_hwpm_err(hwpm, "IP %d" + " perfmux %d NULL alist", + ip_idx, perfmux_idx); + } + } + + /* Add perfmon alist size to full alist size */ + for (perfmon_idx = 0U; + perfmon_idx < chip_ip->num_perfmon_slots; + perfmon_idx++) { + perfmon = chip_ip->ip_perfmon[perfmon_idx]; + + if (perfmon == NULL) { + continue; + } + + if (perfmon->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + if (perfmon->alist) { + hwpm->full_alist_size += + perfmon->alist_size; + } else { + tegra_hwpm_err(hwpm, "IP %d" + " perfmon %d NULL alist", + ip_idx, perfmon_idx); + } + } + } + } + + return 0; +} + +static int t234_hwpm_copy_alist(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 *full_alist, + u64 *full_alist_idx) +{ + u64 f_alist_idx = *full_alist_idx; + u64 alist_idx = 0ULL; + + tegra_hwpm_fn(hwpm, " "); + + if (aperture->alist == NULL) { + tegra_hwpm_err(hwpm, "NULL allowlist in aperture"); + return -EINVAL; + } + + for (alist_idx = 0ULL; alist_idx < aperture->alist_size; alist_idx++) { + if (f_alist_idx >= hwpm->full_alist_size) { + tegra_hwpm_err(hwpm, "No space in full_alist"); + return -ENOMEM; + } + + full_alist[f_alist_idx++] = (aperture->start_abs_pa + + aperture->alist[alist_idx].reg_offset); + } + + /* Store next available index */ + *full_alist_idx = f_alist_idx; + + return 0; +} + +int t234_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + u32 ip_idx; + u32 perfmux_idx, perfmon_idx; + unsigned long inst_idx = 0UL; + unsigned long floorsweep_info = 0UL; + struct hwpm_ip *chip_ip = NULL; + hwpm_ip_perfmux *perfmux = NULL; + hwpm_ip_perfmon *perfmon = NULL; + u64 full_alist_idx = 0; + int err = 0; + + tegra_hwpm_fn(hwpm, " "); + + for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) { + chip_ip = active_chip->chip_ips[ip_idx]; + + /* Skip unavailable IPs */ + if (!chip_ip->reserved) { + continue; + } + + if (chip_ip->fs_mask == 0U) { + /* No IP instance is available */ + continue; + } + + floorsweep_info = (unsigned long)chip_ip->fs_mask; + + for_each_set_bit(inst_idx, &floorsweep_info, 32U) { + /* Copy perfmux alist to full alist array */ + for (perfmux_idx = 0U; + perfmux_idx < chip_ip->num_perfmux_slots; + perfmux_idx++) { + perfmux = chip_ip->ip_perfmux[perfmux_idx]; + + if (perfmux == NULL) { + continue; + } + + if (perfmux->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + err = t234_hwpm_copy_alist(hwpm, perfmux, + alist, &full_alist_idx); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmux %d alist copy failed", + ip_idx, perfmux_idx); + goto fail; + } + } + + /* Copy perfmon alist to full alist array */ + for (perfmon_idx = 0U; + perfmon_idx < chip_ip->num_perfmon_slots; + perfmon_idx++) { + perfmon = chip_ip->ip_perfmon[perfmon_idx]; + + if (perfmon == NULL) { + continue; + } + + if (perfmon->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + err = t234_hwpm_copy_alist(hwpm, perfmon, + alist, &full_alist_idx); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmon %d alist copy failed", + ip_idx, perfmon_idx); + goto fail; + } + } + } + } + + /* Check size of full alist with hwpm->full_alist_size*/ + if (full_alist_idx != hwpm->full_alist_size) { + tegra_hwpm_err(hwpm, "full_alist_size 0x%llx doesn't match " + "max full_alist_idx 0x%llx", + hwpm->full_alist_size, full_alist_idx); + err = -EINVAL; + } + +fail: + return err; +} + +bool t234_hwpm_check_alist(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 phys_addr) +{ + u32 alist_idx; + u64 reg_offset; + + tegra_hwpm_fn(hwpm, " "); + + if (!aperture) { + tegra_hwpm_err(hwpm, "Aperture is NULL"); + return false; + } + if (!aperture->alist) { + tegra_hwpm_err(hwpm, "NULL allowlist in aperture"); + return false; + } + + reg_offset = phys_addr - aperture->start_abs_pa; + + for (alist_idx = 0; alist_idx < aperture->alist_size; alist_idx++) { + if (reg_offset == aperture->alist[alist_idx].reg_offset) { + return true; + } + } + return false; +} diff --git a/hal/t234/t234_hwpm_aperture_utils.c b/hal/t234/t234_hwpm_aperture_utils.c new file mode 100644 index 0000000..4e61858 --- /dev/null +++ b/hal/t234/t234_hwpm_aperture_utils.c @@ -0,0 +1,490 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +int t234_hwpm_reserve_pma(struct tegra_soc_hwpm *hwpm) +{ + u32 perfmux_idx = 0U, perfmon_idx; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip_pma = active_chip->chip_ips[T234_HWPM_IP_PMA]; + hwpm_ip_perfmux *pma_perfmux = NULL; + hwpm_ip_perfmon *pma_perfmon = NULL; + int ret = 0, err = 0; + + tegra_hwpm_fn(hwpm, " "); + + /* Make sure that PMA is not reserved */ + if (chip_ip_pma->reserved == true) { + tegra_hwpm_err(hwpm, "PMA already reserved, ignoring"); + return 0; + } + + /* Reserve PMA perfmux */ + for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots; + perfmux_idx++) { + pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx]; + + if (pma_perfmux == NULL) { + continue; + } + + /* Since PMA is hwpm component, use perfmon reserve function */ + ret = t234_hwpm_perfmon_reserve(hwpm, pma_perfmux); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "PMA perfmux %d reserve failed", perfmux_idx); + return ret; + } + + chip_ip_pma->fs_mask |= pma_perfmux->hw_inst_mask; + } + + /* Reserve PMA perfmons */ + for (perfmon_idx = 0U; perfmon_idx < chip_ip_pma->num_perfmon_slots; + perfmon_idx++) { + pma_perfmon = chip_ip_pma->ip_perfmon[perfmon_idx]; + + if (pma_perfmon == NULL) { + continue; + } + + ret = t234_hwpm_perfmon_reserve(hwpm, pma_perfmon); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "PMA perfmon %d reserve failed", perfmon_idx); + goto fail; + } + } + + chip_ip_pma->reserved = true; + + return 0; +fail: + for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots; + perfmux_idx++) { + pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx]; + + if (pma_perfmux == NULL) { + continue; + } + + /* Since PMA is hwpm component, use perfmon release function */ + err = t234_hwpm_perfmon_release(hwpm, pma_perfmux); + if (err != 0) { + tegra_hwpm_err(hwpm, + "PMA perfmux %d release failed", perfmux_idx); + } + chip_ip_pma->fs_mask &= ~(pma_perfmux->hw_inst_mask); + } + return ret; +} + +int t234_hwpm_release_pma(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + u32 perfmux_idx, perfmon_idx; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip_pma = active_chip->chip_ips[T234_HWPM_IP_PMA]; + hwpm_ip_perfmux *pma_perfmux = NULL; + hwpm_ip_perfmon *pma_perfmon = NULL; + + tegra_hwpm_fn(hwpm, " "); + + if (!chip_ip_pma->reserved) { + tegra_hwpm_dbg(hwpm, hwpm_info, "PMA wasn't mapped, ignoring."); + return 0; + } + + /* Release PMA perfmux */ + for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots; + perfmux_idx++) { + pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx]; + + if (pma_perfmux == NULL) { + continue; + } + + /* Since PMA is hwpm component, use perfmon release function */ + ret = t234_hwpm_perfmon_release(hwpm, pma_perfmux); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "PMA perfmux %d release failed", perfmux_idx); + return ret; + } + chip_ip_pma->fs_mask &= ~(pma_perfmux->hw_inst_mask); + } + + /* Release PMA perfmons */ + for (perfmon_idx = 0U; perfmon_idx < chip_ip_pma->num_perfmon_slots; + perfmon_idx++) { + pma_perfmon = chip_ip_pma->ip_perfmon[perfmon_idx]; + + if (pma_perfmon == NULL) { + continue; + } + + ret = t234_hwpm_perfmon_release(hwpm, pma_perfmon); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "PMA perfmon %d release failed", perfmon_idx); + return ret; + } + } + + chip_ip_pma->reserved = false; + + return 0; +} + +int t234_hwpm_reserve_rtr(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + u32 perfmux_idx = 0U, perfmon_idx; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip_rtr = active_chip->chip_ips[T234_HWPM_IP_RTR]; + struct hwpm_ip *chip_ip_pma = active_chip->chip_ips[T234_HWPM_IP_PMA]; + hwpm_ip_perfmux *pma_perfmux = chip_ip_pma->ip_perfmux[0U]; + hwpm_ip_perfmux *rtr_perfmux = NULL; + + tegra_hwpm_fn(hwpm, " "); + + /* Verify that PMA is reserved before RTR */ + if (chip_ip_pma->reserved == false) { + tegra_hwpm_err(hwpm, "PMA should be reserved before RTR"); + return -EINVAL; + } + + /* Make sure that RTR is not reserved */ + if (chip_ip_rtr->reserved == true) { + tegra_hwpm_err(hwpm, "RTR already reserved, ignoring"); + return 0; + } + + /* Reserve RTR perfmuxes */ + for (perfmux_idx = 0U; perfmux_idx < chip_ip_rtr->num_perfmux_slots; + perfmux_idx++) { + rtr_perfmux = chip_ip_rtr->ip_perfmux[perfmux_idx]; + + if (rtr_perfmux == NULL) { + continue; + } + + if (rtr_perfmux->start_abs_pa == pma_perfmux->start_abs_pa) { + /* This is PMA perfmux wrt RTR aperture */ + rtr_perfmux->start_pa = pma_perfmux->start_pa; + rtr_perfmux->end_pa = pma_perfmux->end_pa; + rtr_perfmux->dt_mmio = pma_perfmux->dt_mmio; + if (hwpm->fake_registers_enabled) { + rtr_perfmux->fake_registers = + pma_perfmux->fake_registers; + } + } else { + /* Since RTR is hwpm component, + * use perfmon reserve function */ + ret = t234_hwpm_perfmon_reserve(hwpm, rtr_perfmux); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "RTR perfmux %d reserve failed", + perfmux_idx); + return ret; + } + } + chip_ip_rtr->fs_mask |= rtr_perfmux->hw_inst_mask; + } + + /* Reserve RTR perfmons */ + for (perfmon_idx = 0U; perfmon_idx < chip_ip_rtr->num_perfmon_slots; + perfmon_idx++) { + /* No perfmons in RTR */ + } + + chip_ip_rtr->reserved = true; + + return ret; +} + +int t234_hwpm_release_rtr(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + u32 perfmux_idx, perfmon_idx; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip_rtr = active_chip->chip_ips[T234_HWPM_IP_RTR]; + struct hwpm_ip *chip_ip_pma = active_chip->chip_ips[T234_HWPM_IP_PMA]; + hwpm_ip_perfmux *pma_perfmux = chip_ip_pma->ip_perfmux[0U]; + hwpm_ip_perfmux *rtr_perfmux = NULL; + + tegra_hwpm_fn(hwpm, " "); + + /* Verify that PMA isn't released before RTR */ + if (chip_ip_pma->reserved == false) { + tegra_hwpm_err(hwpm, "PMA shouldn't be released before RTR"); + return -EINVAL; + } + + if (!chip_ip_rtr->reserved) { + tegra_hwpm_dbg(hwpm, hwpm_info, "RTR wasn't mapped, ignoring."); + return 0; + } + + /* Release RTR perfmux */ + for (perfmux_idx = 0U; perfmux_idx < chip_ip_rtr->num_perfmux_slots; + perfmux_idx++) { + rtr_perfmux = chip_ip_rtr->ip_perfmux[perfmux_idx]; + + if (rtr_perfmux == NULL) { + continue; + } + + if (rtr_perfmux->start_abs_pa == pma_perfmux->start_abs_pa) { + /* This is PMA perfmux wrt RTR aperture */ + rtr_perfmux->start_pa = 0ULL; + rtr_perfmux->end_pa = 0ULL; + rtr_perfmux->dt_mmio = NULL; + if (hwpm->fake_registers_enabled) { + rtr_perfmux->fake_registers = NULL; + } + } else { + /* RTR is hwpm component, use perfmon release func */ + ret = t234_hwpm_perfmon_release(hwpm, rtr_perfmux); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "RTR perfmux %d release failed", + perfmux_idx); + return ret; + } + } + chip_ip_rtr->fs_mask &= ~(rtr_perfmux->hw_inst_mask); + } + + /* Release RTR perfmon */ + for (perfmon_idx = 0U; perfmon_idx < chip_ip_rtr->num_perfmon_slots; + perfmon_idx++) { + /* No RTR perfmons */ + } + + chip_ip_rtr->reserved = false; + return 0; +} + +int t234_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm) +{ + int ret = 0; + bool timeout = false; + u32 reg_val = 0U; + u32 field_mask = 0U; + u32 field_val = 0U; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + /* Currently, RTR specific perfmux is added at index 0 */ + hwpm_ip_perfmux *rtr_perfmux = &active_chip->chip_ips[ + T234_HWPM_IP_RTR]->perfmux_static_array[0U]; + + tegra_hwpm_fn(hwpm, " "); + + /* Disable PMA triggers */ + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, + pmasys_trigger_config_user_r(0)); + reg_val = set_field(reg_val, pmasys_trigger_config_user_pma_pulse_m(), + pmasys_trigger_config_user_pma_pulse_disable_f()); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_trigger_config_user_r(0), reg_val); + + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_sys_trigger_start_mask_r(), 0); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_sys_trigger_start_maskb_r(), 0); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_sys_trigger_stop_mask_r(), 0); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_sys_trigger_stop_maskb_r(), 0); + + /* Wait for PERFMONs, ROUTER, and PMA to idle */ + timeout = HWPM_TIMEOUT(pmmsys_sys0router_perfmonstatus_merged_v( + tegra_hwpm_readl(hwpm, rtr_perfmux, + pmmsys_sys0router_perfmonstatus_r())) == 0U, + "NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED_EMPTY"); + if (timeout && ret == 0) { + ret = -EIO; + } + + timeout = HWPM_TIMEOUT(pmmsys_sys0router_enginestatus_status_v( + tegra_hwpm_readl(hwpm, rtr_perfmux, + pmmsys_sys0router_enginestatus_r())) == + pmmsys_sys0router_enginestatus_status_empty_v(), + "NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_EMPTY"); + if (timeout && ret == 0) { + ret = -EIO; + } + + field_mask = pmasys_enginestatus_status_m() | + pmasys_enginestatus_rbufempty_m(); + field_val = pmasys_enginestatus_status_empty_f() | + pmasys_enginestatus_rbufempty_empty_f(); + timeout = HWPM_TIMEOUT((tegra_hwpm_readl(hwpm, pma_perfmux, + pmasys_enginestatus_r()) & field_mask) == field_val, + "NV_PERF_PMASYS_ENGINESTATUS"); + if (timeout && ret == 0) { + ret = -EIO; + } + + return ret; +} + +int t234_hwpm_init_prod_values(struct tegra_soc_hwpm *hwpm) +{ + u32 reg_val = 0U; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, pmasys_controlb_r()); + reg_val = set_field(reg_val, + pmasys_controlb_coalesce_timeout_cycles_m(), + pmasys_controlb_coalesce_timeout_cycles__prod_f()); + tegra_hwpm_writel(hwpm, pma_perfmux, pmasys_controlb_r(), reg_val); + + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, + pmasys_channel_config_user_r(0)); + reg_val = set_field(reg_val, + pmasys_channel_config_user_coalesce_timeout_cycles_m(), + pmasys_channel_config_user_coalesce_timeout_cycles__prod_f()); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_config_user_r(0), reg_val); + + return 0; +} + +int t234_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm) +{ + u32 field_mask = 0U; + u32 field_val = 0U; + u32 reg_val = 0U; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *pma_ip = NULL, *rtr_ip = NULL; + hwpm_ip_perfmux *pma_perfmux = NULL; + hwpm_ip_perfmux *rtr_perfmux = NULL; + + tegra_hwpm_fn(hwpm, " "); + + if (active_chip == NULL) { + return -ENODEV; + } + + pma_ip = active_chip->chip_ips[T234_HWPM_IP_PMA]; + rtr_ip = active_chip->chip_ips[T234_HWPM_IP_RTR]; + + if ((pma_ip == NULL) || !(pma_ip->reserved)) { + tegra_hwpm_err(hwpm, "PMA uninitialized"); + return -ENODEV; + } + + if ((rtr_ip == NULL) || !(rtr_ip->reserved)) { + tegra_hwpm_err(hwpm, "RTR uninitialized"); + return -ENODEV; + } + + /* Currently, PMA has only one perfmux */ + pma_perfmux = pma_ip->ip_perfmux[0U]; + /* Currently, RTR specific perfmux is added at index 0 */ + rtr_perfmux = &rtr_ip->perfmux_static_array[0U]; + + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, pmasys_cg2_r()); + reg_val = set_field(reg_val, pmasys_cg2_slcg_m(), + pmasys_cg2_slcg_disabled_f()); + tegra_hwpm_writel(hwpm, pma_perfmux, pmasys_cg2_r(), reg_val); + + field_mask = pmmsys_sys0router_cg2_slcg_perfmon_m() | + pmmsys_sys0router_cg2_slcg_router_m() | + pmmsys_sys0router_cg2_slcg_m(); + field_val = pmmsys_sys0router_cg2_slcg_perfmon_disabled_f() | + pmmsys_sys0router_cg2_slcg_router_disabled_f() | + pmmsys_sys0router_cg2_slcg_disabled_f(); + reg_val = tegra_hwpm_readl(hwpm, rtr_perfmux, + pmmsys_sys0router_cg2_r()); + reg_val = set_field(reg_val, field_mask, field_val); + tegra_hwpm_writel(hwpm, rtr_perfmux, + pmmsys_sys0router_cg2_r(), reg_val); + + return 0; +} + +int t234_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm) +{ + u32 reg_val = 0U; + u32 field_mask = 0U; + u32 field_val = 0U; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *pma_ip = NULL, *rtr_ip = NULL; + hwpm_ip_perfmux *pma_perfmux = NULL; + hwpm_ip_perfmux *rtr_perfmux = NULL; + + tegra_hwpm_fn(hwpm, " "); + + if (active_chip == NULL) { + return -ENODEV; + } + + pma_ip = active_chip->chip_ips[T234_HWPM_IP_PMA]; + rtr_ip = active_chip->chip_ips[T234_HWPM_IP_RTR]; + + if ((pma_ip == NULL) || !(pma_ip->reserved)) { + tegra_hwpm_err(hwpm, "PMA uninitialized"); + return -ENODEV; + } + + if ((rtr_ip == NULL) || !(rtr_ip->reserved)) { + tegra_hwpm_err(hwpm, "RTR uninitialized"); + return -ENODEV; + } + + /* Currently, PMA has only one perfmux */ + pma_perfmux = pma_ip->ip_perfmux[0U]; + /* Currently, RTR specific perfmux is added at index 0 */ + rtr_perfmux = &rtr_ip->perfmux_static_array[0U]; + + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, pmasys_cg2_r()); + reg_val = set_field(reg_val, pmasys_cg2_slcg_m(), + pmasys_cg2_slcg_enabled_f()); + tegra_hwpm_writel(hwpm, pma_perfmux, pmasys_cg2_r(), reg_val); + + field_mask = pmmsys_sys0router_cg2_slcg_perfmon_m() | + pmmsys_sys0router_cg2_slcg_router_m() | + pmmsys_sys0router_cg2_slcg_m(); + field_val = pmmsys_sys0router_cg2_slcg_perfmon__prod_f() | + pmmsys_sys0router_cg2_slcg_router__prod_f() | + pmmsys_sys0router_cg2_slcg__prod_f(); + reg_val = tegra_hwpm_readl(hwpm, rtr_perfmux, + pmmsys_sys0router_cg2_r()); + reg_val = set_field(reg_val, field_mask, field_val); + tegra_hwpm_writel(hwpm, rtr_perfmux, + pmmsys_sys0router_cg2_r(), reg_val); + + return 0; +} diff --git a/hal/t234/t234_hwpm_init.h b/hal/t234/t234_hwpm_init.h new file mode 100644 index 0000000..9f2fe54 --- /dev/null +++ b/hal/t234/t234_hwpm_init.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_SOC_HWPM_INIT_H +#define T234_SOC_HWPM_INIT_H + +struct tegra_soc_hwpm; + +int t234_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm); + +#endif /* T234_SOC_HWPM_INIT_H */ diff --git a/hal/t234/t234_hwpm_interface_utils.c b/hal/t234/t234_hwpm_interface_utils.c new file mode 100644 index 0000000..09c2579 --- /dev/null +++ b/hal/t234/t234_hwpm_interface_utils.c @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include + +#include +#include +#include +#include + +struct tegra_soc_hwpm_chip t234_chip_info = { + .chip_ips = NULL, + + /* HALs */ + .is_ip_active = t234_hwpm_is_ip_active, + .is_resource_active = t234_hwpm_is_resource_active, + + .extract_ip_ops = t234_hwpm_extract_ip_ops, + .init_fs_info = t234_hwpm_init_fs_info, + .get_fs_info = t234_hwpm_get_fs_info, + + .init_prod_values = t234_hwpm_init_prod_values, + .disable_slcg = t234_hwpm_disable_slcg, + .enable_slcg = t234_hwpm_enable_slcg, + + .reserve_pma = t234_hwpm_reserve_pma, + .reserve_rtr = t234_hwpm_reserve_rtr, + .release_pma = t234_hwpm_release_pma, + .release_rtr = t234_hwpm_release_rtr, + + .reserve_given_resource = t234_hwpm_reserve_given_resource, + .bind_reserved_resources = t234_hwpm_bind_reserved_resources, + .release_all_resources = t234_hwpm_release_all_resources, + .disable_triggers = t234_hwpm_disable_triggers, + + .disable_mem_mgmt = t234_hwpm_disable_mem_mgmt, + .enable_mem_mgmt = t234_hwpm_enable_mem_mgmt, + .invalidate_mem_config = t234_hwpm_invalidate_mem_config, + .stream_mem_bytes = t234_hwpm_stream_mem_bytes, + .disable_pma_streaming = t234_hwpm_disable_pma_streaming, + .update_mem_bytes_get_ptr = t234_hwpm_update_mem_bytes_get_ptr, + .get_mem_bytes_put_ptr = t234_hwpm_get_mem_bytes_put_ptr, + .membuf_overflow_status = t234_hwpm_membuf_overflow_status, + + .get_alist_buf_size = t234_hwpm_get_alist_buf_size, + .zero_alist_regs = t234_hwpm_zero_alist_regs, + .get_alist_size = t234_hwpm_get_alist_size, + .combine_alist = t234_hwpm_combine_alist, + .check_alist = t234_hwpm_check_alist, + + .exec_reg_ops = t234_hwpm_exec_reg_ops, + + .release_sw_setup = t234_hwpm_release_sw_setup, +}; + +bool t234_hwpm_is_ip_active(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_ip ip_index, u32 *config_ip_index) +{ + u32 config_ip = TEGRA_SOC_HWPM_IP_INACTIVE; + + switch (ip_index) { + case TEGRA_SOC_HWPM_IP_VI: +#if defined(CONFIG_SOC_HWPM_IP_VI) + config_ip = T234_HWPM_IP_VI; +#endif + break; + case TEGRA_SOC_HWPM_IP_ISP: +#if defined(CONFIG_SOC_HWPM_IP_ISP) + config_ip = T234_HWPM_IP_ISP; +#endif + break; + case TEGRA_SOC_HWPM_IP_VIC: +#if defined(CONFIG_SOC_HWPM_IP_VIC) + config_ip = T234_HWPM_IP_VIC; +#endif + break; + case TEGRA_SOC_HWPM_IP_OFA: +#if defined(CONFIG_SOC_HWPM_IP_OFA) + config_ip = T234_HWPM_IP_OFA; +#endif + break; + case TEGRA_SOC_HWPM_IP_PVA: +#if defined(CONFIG_SOC_HWPM_IP_PVA) + config_ip = T234_HWPM_IP_PVA; +#endif + break; + case TEGRA_SOC_HWPM_IP_NVDLA: +#if defined(CONFIG_SOC_HWPM_IP_NVDLA) + config_ip = T234_HWPM_IP_NVDLA; +#endif + break; + case TEGRA_SOC_HWPM_IP_MGBE: +#if defined(CONFIG_SOC_HWPM_IP_MGBE) + config_ip = T234_HWPM_IP_MGBE; +#endif + break; + case TEGRA_SOC_HWPM_IP_SCF: +#if defined(CONFIG_SOC_HWPM_IP_SCF) + config_ip = T234_HWPM_IP_SCF; +#endif + break; + case TEGRA_SOC_HWPM_IP_NVDEC: +#if defined(CONFIG_SOC_HWPM_IP_NVDEC) + config_ip = T234_HWPM_IP_NVDEC; +#endif + break; + case TEGRA_SOC_HWPM_IP_NVENC: +#if defined(CONFIG_SOC_HWPM_IP_NVENC) + config_ip = T234_HWPM_IP_NVENC; +#endif + break; + case TEGRA_SOC_HWPM_IP_PCIE: +#if defined(CONFIG_SOC_HWPM_IP_PCIE) + config_ip = T234_HWPM_IP_PCIE; +#endif + break; + case TEGRA_SOC_HWPM_IP_DISPLAY: +#if defined(CONFIG_SOC_HWPM_IP_DISPLAY) + config_ip = T234_HWPM_IP_DISPLAY; +#endif + break; + case TEGRA_SOC_HWPM_IP_MSS_CHANNEL: +#if defined(CONFIG_SOC_HWPM_IP_MSS_CHANNEL) + config_ip = T234_HWPM_IP_MSS_CHANNEL; +#endif + break; + case TEGRA_SOC_HWPM_IP_MSS_GPU_HUB: +#if defined(CONFIG_SOC_HWPM_IP_MSS_GPU_HUB) + config_ip = T234_HWPM_IP_MSS_GPU_HUB; +#endif + break; + case TEGRA_SOC_HWPM_IP_MSS_ISO_NISO_HUBS: +#if defined(CONFIG_SOC_HWPM_IP_MSS_ISO_NISO_HUBS) + config_ip = T234_HWPM_IP_MSS_ISO_NISO_HUBS; +#endif + break; + case TEGRA_SOC_HWPM_IP_MSS_MCF: +#if defined(CONFIG_SOC_HWPM_IP_MSS_MCF) + config_ip = T234_HWPM_IP_MSS_MCF; +#endif + break; + default: + tegra_hwpm_err(hwpm, "Queried enum tegra_soc_hwpm_ip %d invalid", + ip_index); + break; + } + + *config_ip_index = config_ip; + return (config_ip != TEGRA_SOC_HWPM_IP_INACTIVE); +} + +bool t234_hwpm_is_resource_active(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_resource res_index, u32 *config_ip_index) +{ + u32 config_ip = TEGRA_SOC_HWPM_IP_INACTIVE; + + switch (res_index) { + case TEGRA_SOC_HWPM_RESOURCE_VI: +#if defined(CONFIG_SOC_HWPM_IP_VI) + config_ip = T234_HWPM_IP_VI; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_ISP: +#if defined(CONFIG_SOC_HWPM_IP_ISP) + config_ip = T234_HWPM_IP_ISP; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_VIC: +#if defined(CONFIG_SOC_HWPM_IP_VIC) + config_ip = T234_HWPM_IP_VIC; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_OFA: +#if defined(CONFIG_SOC_HWPM_IP_OFA) + config_ip = T234_HWPM_IP_OFA; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_PVA: +#if defined(CONFIG_SOC_HWPM_IP_PVA) + config_ip = T234_HWPM_IP_PVA; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_NVDLA: +#if defined(CONFIG_SOC_HWPM_IP_NVDLA) + config_ip = T234_HWPM_IP_NVDLA; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_MGBE: +#if defined(CONFIG_SOC_HWPM_IP_MGBE) + config_ip = T234_HWPM_IP_MGBE; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_SCF: +#if defined(CONFIG_SOC_HWPM_IP_SCF) + config_ip = T234_HWPM_IP_SCF; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_NVDEC: +#if defined(CONFIG_SOC_HWPM_IP_NVDEC) + config_ip = T234_HWPM_IP_NVDEC; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_NVENC: +#if defined(CONFIG_SOC_HWPM_IP_NVENC) + config_ip = T234_HWPM_IP_NVENC; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_PCIE: +#if defined(CONFIG_SOC_HWPM_IP_PCIE) + config_ip = T234_HWPM_IP_PCIE; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_DISPLAY: +#if defined(CONFIG_SOC_HWPM_IP_DISPLAY) + config_ip = T234_HWPM_IP_DISPLAY; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_MSS_CHANNEL: +#if defined(CONFIG_SOC_HWPM_IP_MSS_CHANNEL) + config_ip = T234_HWPM_IP_MSS_CHANNEL; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_MSS_GPU_HUB: +#if defined(CONFIG_SOC_HWPM_IP_MSS_GPU_HUB) + config_ip = T234_HWPM_IP_MSS_GPU_HUB; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_MSS_ISO_NISO_HUBS: +#if defined(CONFIG_SOC_HWPM_IP_MSS_ISO_NISO_HUBS) + config_ip = T234_HWPM_IP_MSS_ISO_NISO_HUBS; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_MSS_MCF: +#if defined(CONFIG_SOC_HWPM_IP_MSS_MCF) + config_ip = T234_HWPM_IP_MSS_MCF; +#endif + break; + case TEGRA_SOC_HWPM_RESOURCE_PMA: + config_ip = T234_HWPM_IP_PMA; + break; + case TEGRA_SOC_HWPM_RESOURCE_CMD_SLICE_RTR: + config_ip = T234_HWPM_IP_RTR; + break; + default: + tegra_hwpm_err(hwpm, "Queried resource %d invalid", + res_index); + break; + } + + *config_ip_index = config_ip; + return (config_ip != TEGRA_SOC_HWPM_IP_INACTIVE); +} + +static int t234_hwpm_init_ip_perfmux_apertures(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip *chip_ip) +{ + u32 idx = 0U, perfmux_idx = 0U, max_perfmux = 0U; + u64 perfmux_address_range = 0ULL, perfmux_offset = 0ULL; + hwpm_ip_perfmux *perfmux = NULL; + + /* Initialize perfmux array */ + if (chip_ip->num_perfmux_per_inst == 0U) { + /* no perfmux in this IP */ + return 0; + } + + perfmux_address_range = chip_ip->perfmux_range_end - + chip_ip->perfmux_range_start + 1ULL; + chip_ip->num_perfmux_slots = + (u32) (perfmux_address_range / chip_ip->inst_perfmux_stride); + + chip_ip->ip_perfmux = kzalloc( + sizeof(hwpm_ip_perfmux *) * chip_ip->num_perfmux_slots, + GFP_KERNEL); + if (chip_ip->ip_perfmux == NULL) { + tegra_hwpm_err(hwpm, "Perfmux pointer array allocation failed"); + return -ENOMEM; + } + + /* Set all perfmux slot pointers to NULL */ + for (idx = 0U; idx < chip_ip->num_perfmux_slots; idx++) { + chip_ip->ip_perfmux[idx] = NULL; + } + + /* Assign valid perfmuxes to corresponding slot pointers */ + max_perfmux = chip_ip->num_instances * chip_ip->num_perfmux_per_inst; + for (perfmux_idx = 0U; perfmux_idx < max_perfmux; perfmux_idx++) { + perfmux = &chip_ip->perfmux_static_array[perfmux_idx]; + + /* Compute perfmux offset from perfmux range start */ + perfmux_offset = + perfmux->start_abs_pa - chip_ip->perfmux_range_start; + + /* Compute perfmux slot index */ + idx = (u32)(perfmux_offset / chip_ip->inst_perfmux_stride); + + /* Set perfmux slot pointer */ + chip_ip->ip_perfmux[idx] = perfmux; + } + + return 0; +} + +static int t234_hwpm_init_ip_perfmon_apertures(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip *chip_ip) +{ + u32 idx = 0U, perfmon_idx = 0U, max_perfmon = 0U; + u64 perfmon_address_range = 0ULL, perfmon_offset = 0ULL; + hwpm_ip_perfmon *perfmon = NULL; + + /* Initialize perfmon array */ + if (chip_ip->num_perfmon_per_inst == 0U) { + /* no perfmons in this IP */ + return 0; + } + + perfmon_address_range = chip_ip->perfmon_range_end - + chip_ip->perfmon_range_start + 1ULL; + chip_ip->num_perfmon_slots = + (u32) (perfmon_address_range / chip_ip->inst_perfmon_stride); + + chip_ip->ip_perfmon = kzalloc( + sizeof(hwpm_ip_perfmon *) * chip_ip->num_perfmon_slots, + GFP_KERNEL); + if (chip_ip->ip_perfmon == NULL) { + tegra_hwpm_err(hwpm, "Perfmon pointer array allocation failed"); + return -ENOMEM; + } + + /* Set all perfmon slot pointers to NULL */ + for (idx = 0U; idx < chip_ip->num_perfmon_slots; idx++) { + chip_ip->ip_perfmon[idx] = NULL; + } + + /* Assign valid perfmuxes to corresponding slot pointers */ + max_perfmon = chip_ip->num_instances * chip_ip->num_perfmon_per_inst; + for (perfmon_idx = 0U; perfmon_idx < max_perfmon; perfmon_idx++) { + perfmon = &chip_ip->perfmon_static_array[perfmon_idx]; + + /* Compute perfmon offset from perfmon range start */ + perfmon_offset = + perfmon->start_abs_pa - chip_ip->perfmon_range_start; + + /* Compute perfmon slot index */ + idx = (u32)(perfmon_offset / chip_ip->inst_perfmon_stride); + + /* Set perfmon slot pointer */ + chip_ip->ip_perfmon[idx] = perfmon; + } + + return 0; +} + +static int t234_hwpm_init_chip_ip_structures(struct tegra_soc_hwpm *hwpm) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = NULL; + u32 ip_idx; + int ret = 0; + + for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) { + chip_ip = active_chip->chip_ips[ip_idx]; + + ret = t234_hwpm_init_ip_perfmon_apertures(hwpm, chip_ip); + if (ret != 0) { + tegra_hwpm_err(hwpm, "IP %d perfmon alloc failed", + ip_idx); + return ret; + } + + ret = t234_hwpm_init_ip_perfmux_apertures(hwpm, chip_ip); + if (ret != 0) { + tegra_hwpm_err(hwpm, "IP %d perfmux alloc failed", + ip_idx); + return ret; + } + } + + return 0; +} + +int t234_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm) +{ + struct hwpm_ip **t234_active_ip_info; + int ret = 0; + + /* Allocate array of pointers to hold active IP structures */ + t234_chip_info.chip_ips = + kzalloc(sizeof(struct hwpm_ip *) * T234_HWPM_IP_MAX, + GFP_KERNEL); + + /* Add active chip structure link to hwpm super-structure */ + hwpm->active_chip = &t234_chip_info; + + /* Temporary pointer to make below assignments legible */ + t234_active_ip_info = t234_chip_info.chip_ips; + + t234_active_ip_info[T234_HWPM_IP_PMA] = &t234_hwpm_ip_pma; + t234_active_ip_info[T234_HWPM_IP_RTR] = &t234_hwpm_ip_rtr; + +#if defined(CONFIG_SOC_HWPM_IP_DISPLAY) + t234_active_ip_info[T234_HWPM_IP_DISPLAY] = &t234_hwpm_ip_display; +#endif +#if defined(CONFIG_SOC_HWPM_IP_ISP) + t234_active_ip_info[T234_HWPM_IP_ISP] = &t234_hwpm_ip_isp; +#endif +#if defined(CONFIG_SOC_HWPM_IP_MGBE) + t234_active_ip_info[T234_HWPM_IP_MGBE] = &t234_hwpm_ip_mgbe; +#endif +#if defined(CONFIG_SOC_HWPM_IP_MSS_CHANNEL) + t234_active_ip_info[T234_HWPM_IP_MSS_CHANNEL] = + &t234_hwpm_ip_mss_channel; +#endif +#if defined(CONFIG_SOC_HWPM_IP_MSS_GPU_HUB) + t234_active_ip_info[T234_HWPM_IP_MSS_GPU_HUB] = + &t234_hwpm_ip_mss_gpu_hub; +#endif +#if defined(CONFIG_SOC_HWPM_IP_MSS_ISO_NISO_HUBS) + t234_active_ip_info[T234_HWPM_IP_MSS_ISO_NISO_HUBS] = + &t234_hwpm_ip_mss_iso_niso_hubs; +#endif +#if defined(CONFIG_SOC_HWPM_IP_MSS_MCF) + t234_active_ip_info[T234_HWPM_IP_MSS_MCF] = &t234_hwpm_ip_mss_mcf; +#endif +#if defined(CONFIG_SOC_HWPM_IP_NVDEC) + t234_active_ip_info[T234_HWPM_IP_NVDEC] = &t234_hwpm_ip_nvdec; +#endif +#if defined(CONFIG_SOC_HWPM_IP_NVDLA) + t234_active_ip_info[T234_HWPM_IP_NVDLA] = &t234_hwpm_ip_nvdla; +#endif +#if defined(CONFIG_SOC_HWPM_IP_NVENC) + t234_active_ip_info[T234_HWPM_IP_NVENC] = &t234_hwpm_ip_nvenc; +#endif +#if defined(CONFIG_SOC_HWPM_IP_OFA) + t234_active_ip_info[T234_HWPM_IP_OFA] = &t234_hwpm_ip_ofa; +#endif +#if defined(CONFIG_SOC_HWPM_IP_PCIE) + t234_active_ip_info[T234_HWPM_IP_PCIE] = &t234_hwpm_ip_pcie; +#endif +#if defined(CONFIG_SOC_HWPM_IP_PVA) + t234_active_ip_info[T234_HWPM_IP_PVA] = &t234_hwpm_ip_pva; +#endif +#if defined(CONFIG_SOC_HWPM_IP_SCF) + t234_active_ip_info[T234_HWPM_IP_SCF] = &t234_hwpm_ip_scf; +#endif +#if defined(CONFIG_SOC_HWPM_IP_VI) + t234_active_ip_info[T234_HWPM_IP_VI] = &t234_hwpm_ip_vi; +#endif +#if defined(CONFIG_SOC_HWPM_IP_VIC) + t234_active_ip_info[T234_HWPM_IP_VIC] = &t234_hwpm_ip_vic; +#endif + ret = t234_hwpm_init_chip_ip_structures(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "IP structure init failed"); + return ret; + } + + return 0; +} + +void t234_hwpm_release_sw_setup(struct tegra_soc_hwpm *hwpm) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = NULL; + u32 ip_idx; + + for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) { + chip_ip = active_chip->chip_ips[ip_idx]; + + /* Release perfmux array */ + if (chip_ip->num_perfmux_per_inst != 0U) { + kfree(chip_ip->ip_perfmux); + } + + /* Release perfmon array */ + if (chip_ip->num_perfmon_per_inst != 0U) { + kfree(chip_ip->ip_perfmon); + } + } + return; +} diff --git a/hal/t234/t234_hwpm_internal.h b/hal/t234/t234_hwpm_internal.h new file mode 100644 index 0000000..be6c2b5 --- /dev/null +++ b/hal/t234/t234_hwpm_internal.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef T234_SOC_HWPM_INTERNAL_H +#define T234_SOC_HWPM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define T234_HWPM_ACTIVE_IP_MAX \ + DEFINE_SOC_HWPM_ACTIVE_IP(T234_HWPM_IP_MAX), + +#define DEFINE_SOC_HWPM_ACTIVE_IP(name) name +enum t234_hwpm_active_ips { + T234_HWPM_ACTIVE_IP_VI + T234_HWPM_ACTIVE_IP_ISP + T234_HWPM_ACTIVE_IP_VIC + T234_HWPM_ACTIVE_IP_OFA + T234_HWPM_ACTIVE_IP_PVA + T234_HWPM_ACTIVE_IP_NVDLA + T234_HWPM_ACTIVE_IP_MGBE + T234_HWPM_ACTIVE_IP_SCF + T234_HWPM_ACTIVE_IP_NVDEC + T234_HWPM_ACTIVE_IP_NVENC + T234_HWPM_ACTIVE_IP_PCIE + T234_HWPM_ACTIVE_IP_DISPLAY + T234_HWPM_ACTIVE_IP_MSS_CHANNEL + T234_HWPM_ACTIVE_IP_MSS_GPU_HUB + T234_HWPM_ACTIVE_IP_MSS_ISO_NISO_HUBS + T234_HWPM_ACTIVE_IP_MSS_MCF + T234_HWPM_ACTIVE_IP_PMA + T234_HWPM_ACTIVE_IP_RTR + T234_HWPM_ACTIVE_IP_MAX +}; +#undef DEFINE_SOC_HWPM_ACTIVE_IP + +enum tegra_soc_hwpm_ip; +enum tegra_soc_hwpm_resource; +struct tegra_soc_hwpm; +struct hwpm_ip_aperture; + +bool t234_hwpm_is_ip_active(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_ip ip_index, u32 *config_ip_index); +bool t234_hwpm_is_resource_active(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_resource res_index, u32 *config_ip_index); + +int t234_hwpm_extract_ip_ops(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops, bool available); +int t234_hwpm_init_fs_info(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_get_fs_info(struct tegra_soc_hwpm *hwpm, + u32 ip_index, u64 *fs_mask, u8 *ip_status); + +int t234_hwpm_init_prod_values(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm); + +int t234_hwpm_reserve_pma(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_reserve_rtr(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_release_pma(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_release_rtr(struct tegra_soc_hwpm *hwpm); + +int t234_hwpm_perfmon_reserve(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmon *perfmon); +int t234_hwpm_perfmon_release(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmon *perfmon); +int t234_hwpm_reserve_given_resource(struct tegra_soc_hwpm *hwpm, u32 ip_idx); +int t234_hwpm_bind_reserved_resources(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_release_all_resources(struct tegra_soc_hwpm *hwpm); + +int t234_hwpm_disable_mem_mgmt(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream); +int t234_hwpm_invalidate_mem_config(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_stream_mem_bytes(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_disable_pma_streaming(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_update_mem_bytes_get_ptr(struct tegra_soc_hwpm *hwpm, + u64 mem_bump); +u64 t234_hwpm_get_mem_bytes_put_ptr(struct tegra_soc_hwpm *hwpm); +bool t234_hwpm_membuf_overflow_status(struct tegra_soc_hwpm *hwpm); + +size_t t234_hwpm_get_alist_buf_size(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_zero_alist_regs(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture); +int t234_hwpm_get_alist_size(struct tegra_soc_hwpm *hwpm); +int t234_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist); +bool t234_hwpm_check_alist(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 phys_addr); + +int t234_hwpm_exec_reg_ops(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_reg_op *reg_op); + +void t234_hwpm_release_sw_setup(struct tegra_soc_hwpm *hwpm); + + +#endif /* T234_SOC_HWPM_INTERNAL_H */ diff --git a/hal/t234/t234_hwpm_ip_utils.c b/hal/t234/t234_hwpm_ip_utils.c new file mode 100644 index 0000000..632d61a --- /dev/null +++ b/hal/t234/t234_hwpm_ip_utils.c @@ -0,0 +1,649 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include + +#include +#include +#include +#include + +/* + * Currently, all IPs do not self register to the hwpm driver + * This function is used to force set floorsweep mask for IPs which + * contain perfmon only (eg. SCF) + */ +static int t234_hwpm_update_floorsweep_mask_using_perfmon( + struct tegra_soc_hwpm *hwpm, + u32 ip_idx, u32 ip_perfmon_idx, bool available) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx]; + hwpm_ip_perfmon *perfmon = NULL; + + tegra_hwpm_fn(hwpm, " "); + + if (chip_ip->override_enable) { + /* This IP shouldn't be configured, ignore this request */ + return 0; + } + + perfmon = chip_ip->ip_perfmon[ip_perfmon_idx]; + if (perfmon == NULL) { + tegra_hwpm_err(hwpm, + "IP %d perfmon_idx %d not populated as expected", + ip_idx, ip_perfmon_idx); + return -EINVAL; + } + + /* Update floorsweep info */ + if (available) { + chip_ip->fs_mask |= perfmon->hw_inst_mask; + } else { + chip_ip->fs_mask &= ~(perfmon->hw_inst_mask); + } + + return 0; +} + +static int t234_hwpm_update_floorsweep_mask(struct tegra_soc_hwpm *hwpm, + u32 ip_idx, u32 ip_perfmux_idx, bool available) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx]; + hwpm_ip_perfmux *perfmux = NULL; + + tegra_hwpm_fn(hwpm, " "); + + if (chip_ip->override_enable) { + /* This IP shouldn't be configured, ignore this request */ + return 0; + } + + perfmux = chip_ip->ip_perfmux[ip_perfmux_idx]; + if (perfmux == NULL) { + tegra_hwpm_err(hwpm, + "IP %d perfmux_idx %d not populated as expected", + ip_idx, ip_perfmux_idx); + return -EINVAL; + } + + /* Update floorsweep info */ + if (available) { + chip_ip->fs_mask |= perfmux->hw_inst_mask; + } else { + chip_ip->fs_mask &= ~(perfmux->hw_inst_mask); + } + + return 0; +} + +static int t234_hwpm_update_ip_ops_info(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops, + u32 ip_idx, u32 ip_perfmux_idx, bool available) +{ + u32 perfmux_idx, max_num_perfmux = 0U; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx]; + struct tegra_soc_hwpm_ip_ops *ip_ops; + hwpm_ip_perfmux *given_perfmux = chip_ip->ip_perfmux[ip_perfmux_idx]; + hwpm_ip_perfmux *perfmux = NULL; + + tegra_hwpm_fn(hwpm, " "); + + if (chip_ip->override_enable) { + /* This IP shouldn't be configured, ignore this request */ + return 0; + } + + if (given_perfmux == NULL) { + tegra_hwpm_err(hwpm, + "IP %d given_perfmux idx %d not populated as expected", + ip_idx, ip_perfmux_idx); + return -EINVAL; + } + + /* Update IP ops info for all perfmux in the instance */ + max_num_perfmux = + chip_ip->num_instances * chip_ip->num_perfmux_per_inst; + for (perfmux_idx = 0U; perfmux_idx < max_num_perfmux; perfmux_idx++) { + perfmux = &chip_ip->perfmux_static_array[perfmux_idx]; + + if (perfmux->hw_inst_mask != given_perfmux->hw_inst_mask) { + continue; + } + + ip_ops = &perfmux->ip_ops; + + if (available) { + ip_ops->ip_base_address = hwpm_ip_ops->ip_base_address; + ip_ops->ip_index = hwpm_ip_ops->ip_index; + ip_ops->ip_dev = hwpm_ip_ops->ip_dev; + ip_ops->hwpm_ip_pm = hwpm_ip_ops->hwpm_ip_pm; + ip_ops->hwpm_ip_reg_op = hwpm_ip_ops->hwpm_ip_reg_op; + } else { + /* Do I need a check to see if the ip_ops are set ? */ + ip_ops->ip_base_address = 0ULL; + ip_ops->ip_index = TEGRA_SOC_HWPM_IP_INACTIVE; + ip_ops->ip_dev = NULL; + ip_ops->hwpm_ip_pm = NULL; + ip_ops->hwpm_ip_reg_op = NULL; + } + } + + return 0; +} + +static int t234_hwpm_fs_and_ip_ops(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops, + u32 ip_idx, u32 perfmux_idx, bool available) +{ + int ret = -EINVAL; + + tegra_hwpm_fn(hwpm, " "); + + ret = t234_hwpm_update_floorsweep_mask( + hwpm, ip_idx, perfmux_idx, available); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d: Failed to update FS mask", + ip_idx, perfmux_idx); + goto fail; + } + ret = t234_hwpm_update_ip_ops_info(hwpm, hwpm_ip_ops, + ip_idx, perfmux_idx, available); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d: Failed to update ip_ops", + ip_idx, perfmux_idx); + goto fail; + } +fail: + return ret; +} + +/* + * This function finds the IP perfmux index corresponding to given base address. + * Perfmux aperture belongs to IP domain and contains IP instance info + * wrt base address. + * Return instance index + */ +static int t234_hwpm_find_ip_perfmux_index(struct tegra_soc_hwpm *hwpm, + u64 base_addr, u32 ip_index, u32 *ip_perfmux_idx) +{ + struct tegra_soc_hwpm_chip *active_chip = NULL; + struct hwpm_ip *chip_ip = NULL; + u32 perfmux_idx; + u64 addr_offset = 0ULL; + hwpm_ip_perfmux *perfmux = NULL; + + tegra_hwpm_fn(hwpm, " "); + + if (ip_perfmux_idx == NULL) { + tegra_hwpm_err(hwpm, "pointer for ip_perfmux_idx is NULL"); + return -EINVAL; + } + + if (hwpm->active_chip == NULL) { + tegra_hwpm_err(hwpm, "chip struct not populated"); + return -ENODEV; + } + + active_chip = hwpm->active_chip; + + if (ip_index == TEGRA_SOC_HWPM_IP_INACTIVE) { + tegra_hwpm_err(hwpm, "invalid ip_index %d", ip_index); + return -EINVAL; + } + + chip_ip = active_chip->chip_ips[ip_index]; + + if (chip_ip == NULL) { + tegra_hwpm_err(hwpm, "IP %d not populated", ip_index); + return -ENODEV; + } + + if (chip_ip->override_enable) { + /* This IP should not be configured for HWPM */ + tegra_hwpm_dbg(hwpm, hwpm_info, + "IP %d enable override", ip_index); + return 0; /* Should this be notified to caller or ignored */ + } + + /* Validate phys_addr falls in IP address range */ + if ((base_addr < chip_ip->perfmux_range_start) || + (base_addr > chip_ip->perfmux_range_end)) { + tegra_hwpm_dbg(hwpm, hwpm_info, + "phys address 0x%llx not in IP %d", + base_addr, ip_index); + return -ENODEV; + } + + /* Find IP instance for given phys_address */ + /* + * Since all IP instances are configured to be in consecutive memory, + * instance index can be found using instance physical address stride. + */ + addr_offset = base_addr - chip_ip->perfmux_range_start; + perfmux_idx = (u32)(addr_offset / chip_ip->inst_perfmux_stride); + + /* Make sure instance index is valid */ + if (perfmux_idx >= chip_ip->num_perfmux_slots) { + tegra_hwpm_err(hwpm, + "IP:%d -> base addr 0x%llx is out of bounds", + ip_index, base_addr); + return -EINVAL; + } + + /* Validate IP instance perfmux start address = given phys addr */ + perfmux = chip_ip->ip_perfmux[perfmux_idx]; + + if (perfmux == NULL) { + tegra_hwpm_err(hwpm, + "IP %d perfmux_idx %d not populated as expected", + ip_index, perfmux_idx); + return -EINVAL; + } + + if (base_addr != perfmux->start_abs_pa) { + tegra_hwpm_dbg(hwpm, hwpm_info, + "base addr 0x%llx != perfmux abs addr", base_addr); + return -EINVAL; + } + + *ip_perfmux_idx = perfmux_idx; + + return 0; +} + +int t234_hwpm_extract_ip_ops(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops, bool available) +{ + int ret = 0; + u32 perfmux_idx = 0U; + u32 ip_idx = 0U; + + tegra_hwpm_fn(hwpm, " "); + + /* Convert tegra_soc_hwpm_ip to internal enum */ + if (!(t234_hwpm_is_ip_active(hwpm, + hwpm_ip_ops->ip_index, &ip_idx))) { + tegra_hwpm_err(hwpm, + "SOC hwpm IP %d (base 0x%llx) is unconfigured", + hwpm_ip_ops->ip_index, hwpm_ip_ops->ip_base_address); + goto fail; + } + + switch (ip_idx) { + case T234_HWPM_IP_VI: + case T234_HWPM_IP_ISP: + case T234_HWPM_IP_VIC: + case T234_HWPM_IP_OFA: + case T234_HWPM_IP_PVA: + case T234_HWPM_IP_NVDLA: + case T234_HWPM_IP_MGBE: + case T234_HWPM_IP_SCF: + case T234_HWPM_IP_NVDEC: + case T234_HWPM_IP_NVENC: + case T234_HWPM_IP_PCIE: + case T234_HWPM_IP_DISPLAY: + case T234_HWPM_IP_MSS_GPU_HUB: + /* Get IP info */ + ret = t234_hwpm_find_ip_perfmux_index(hwpm, + hwpm_ip_ops->ip_base_address, ip_idx, &perfmux_idx); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d base 0x%llx no perfmux match", + ip_idx, hwpm_ip_ops->ip_base_address); + goto fail; + } + + ret = t234_hwpm_fs_and_ip_ops(hwpm, hwpm_ip_ops, + ip_idx, perfmux_idx, available); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "Failed to %s fs/ops for IP %d perfmux %d", + available ? "set" : "reset", + ip_idx, perfmux_idx); + goto fail; + } + break; + case T234_HWPM_IP_MSS_CHANNEL: + case T234_HWPM_IP_MSS_ISO_NISO_HUBS: + case T234_HWPM_IP_MSS_MCF: + /* Check base address in T234_HWPM_IP_MSS_CHANNEL */ + ip_idx = T234_HWPM_IP_MSS_CHANNEL; + ret = t234_hwpm_find_ip_perfmux_index(hwpm, + hwpm_ip_ops->ip_base_address, ip_idx, &perfmux_idx); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d base 0x%llx no perfmux match", + ip_idx, hwpm_ip_ops->ip_base_address); + } else { + ret = t234_hwpm_fs_and_ip_ops(hwpm, hwpm_ip_ops, + ip_idx, perfmux_idx, available); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d: fs/ops %s failed", + available ? "set" : "reset", + ip_idx, perfmux_idx); + goto fail; + } + } + + /* Check base address in T234_HWPM_IP_MSS_ISO_NISO_HUBS */ + ip_idx = T234_HWPM_IP_MSS_ISO_NISO_HUBS; + ret = t234_hwpm_find_ip_perfmux_index(hwpm, + hwpm_ip_ops->ip_base_address, ip_idx, &perfmux_idx); + if (ret != 0) { + /* + * Return value of ENODEV will indicate that the base + * address doesn't belong to this IP. + * This case is valid, as not all base addresses are + * shared between MSS IPs. + */ + if (ret != -ENODEV) { + goto fail; + } + } else { + ret = t234_hwpm_fs_and_ip_ops(hwpm, hwpm_ip_ops, + ip_idx, perfmux_idx, available); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d: fs/ops %s failed", + ip_idx, perfmux_idx, + available ? "set" : "reset"); + goto fail; + } + } + + /* Check base address in T234_HWPM_IP_MSS_CHANNEL */ + ip_idx = T234_HWPM_IP_MSS_CHANNEL; + ret = t234_hwpm_find_ip_perfmux_index(hwpm, + hwpm_ip_ops->ip_base_address, ip_idx, &perfmux_idx); + if (ret != 0) { + /* + * Return value of ENODEV will indicate that the base + * address doesn't belong to this IP. + * This case is valid, as not all base addresses are + * shared between MSS IPs. + */ + if (ret != -ENODEV) { + goto fail; + } + } else { + ret = t234_hwpm_fs_and_ip_ops(hwpm, hwpm_ip_ops, + ip_idx, perfmux_idx, available); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d: fs/ops %s failed", + ip_idx, perfmux_idx, + available ? "set" : "reset"); + goto fail; + } + } + break; + case T234_HWPM_IP_PMA: + case T234_HWPM_IP_RTR: + default: + tegra_hwpm_err(hwpm, "Invalid IP %d for ip_ops", ip_idx); + break; + } + +fail: + return ret; +} + +/* + * Find IP perfmux index and set corresponding floorsweep info. + */ +int t234_hwpm_set_fs_info(struct tegra_soc_hwpm *hwpm, u64 base_address, + u32 ip_idx, bool available) +{ + int ret = 0; + u32 perfmux_idx = 0U; + + tegra_hwpm_fn(hwpm, " "); + + ret = t234_hwpm_find_ip_perfmux_index(hwpm, + base_address, ip_idx, &perfmux_idx); + if (ret != 0) { + tegra_hwpm_err(hwpm, "IP %d base 0x%llx no perfmux match", + ip_idx, base_address); + goto fail; + } + + ret = t234_hwpm_update_floorsweep_mask( + hwpm, ip_idx, perfmux_idx, available); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d base 0x%llx: FS mask update failed", + ip_idx, perfmux_idx, base_address); + goto fail; + } +fail: + return ret; +} + +/* + * Some IPs don't register with HWPM driver at the moment. Force set available + * instances of such IPs. + */ +int t234_hwpm_init_fs_info(struct tegra_soc_hwpm *hwpm) +{ + u32 i; + int ret = 0; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = NULL; + + tegra_hwpm_fn(hwpm, " "); + + if (tegra_platform_is_vsp()) { + /* Static IP instances as per VSP netlist */ + /* MSS CHANNEL: vsp has single instance available */ + ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc0_base_r(), + T234_HWPM_IP_MSS_CHANNEL, true); + if (ret != 0) { + goto fail; + } + + /* MSS GPU HUB */ + ret = t234_hwpm_set_fs_info(hwpm, + addr_map_mss_nvlink_1_base_r(), + T234_HWPM_IP_MSS_GPU_HUB, true); + if (ret != 0) { + goto fail; + } + } + if (tegra_platform_is_silicon()) { + /* Static IP instances corresponding to silicon */ + /* VI */ + /*ret = t234_hwpm_set_fs_info(hwpm, addr_map_vi_thi_base_r(), + T234_HWPM_IP_VI, true); + if (ret != 0) { + goto fail; + } + ret = t234_hwpm_set_fs_info(hwpm, addr_map_vi2_thi_base_r(), + T234_HWPM_IP_VI, true); + if (ret != 0) { + goto fail; + }*/ + + /* ISP */ + ret = t234_hwpm_set_fs_info(hwpm, addr_map_isp_thi_base_r(), + T234_HWPM_IP_ISP, true); + if (ret != 0) { + goto fail; + } + + /* PVA */ + ret = t234_hwpm_set_fs_info(hwpm, addr_map_pva0_pm_base_r(), + T234_HWPM_IP_PVA, true); + if (ret != 0) { + goto fail; + } + + /* NVDLA */ + ret = t234_hwpm_set_fs_info(hwpm, + addr_map_nvdla0_base_r(), + T234_HWPM_IP_NVDLA, true); + if (ret != 0) { + goto fail; + } + ret = t234_hwpm_set_fs_info(hwpm, + addr_map_nvdla1_base_r(), + T234_HWPM_IP_NVDLA, true); + if (ret != 0) { + goto fail; + } + + /* MGBE */ + /*ret = t234_hwpm_set_fs_info(hwpm, + addr_map_mgbe0_mac_rm_base_r(), + T234_HWPM_IP_MGBE, true); + if (ret != 0) { + goto fail; + }*/ + + /* SCF */ + ret = t234_hwpm_update_floorsweep_mask_using_perfmon(hwpm, + T234_HWPM_IP_SCF, 0U, true); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "T234_HWPM_IP_SCF: FS mask update failed"); + goto fail; + } + + /* NVDEC */ + ret = t234_hwpm_set_fs_info(hwpm, addr_map_nvdec_base_r(), + T234_HWPM_IP_NVDEC, true); + if (ret != 0) { + goto fail; + } + + /* PCIE */ + /*ret = t234_hwpm_set_fs_info(hwpm, + addr_map_pcie_c1_ctl_base_r(), + T234_HWPM_IP_PCIE, true); + if (ret != 0) { + goto fail; + } + ret = t234_hwpm_set_fs_info(hwpm, + addr_map_pcie_c4_ctl_base_r(), + T234_HWPM_IP_PCIE, true); + if (ret != 0) { + goto fail; + } + ret = t234_hwpm_set_fs_info(hwpm, + addr_map_pcie_c5_ctl_base_r(), + T234_HWPM_IP_PCIE, true); + if (ret != 0) { + goto fail; + }*/ + + /* DISPLAY */ + /*ret = t234_hwpm_set_fs_info(hwpm, addr_map_disp_base_r(), + T234_HWPM_IP_DISPLAY, true); + if (ret != 0) { + goto fail; + }*/ + + /* MSS CHANNEL */ + ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc0_base_r(), + T234_HWPM_IP_MSS_CHANNEL, true); + if (ret != 0) { + goto fail; + } + ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc4_base_r(), + T234_HWPM_IP_MSS_CHANNEL, true); + if (ret != 0) { + goto fail; + } + ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc8_base_r(), + T234_HWPM_IP_MSS_CHANNEL, true); + if (ret != 0) { + goto fail; + } + ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc12_base_r(), + T234_HWPM_IP_MSS_CHANNEL, true); + if (ret != 0) { + goto fail; + } + + /* MSS ISO NISO HUBS */ + ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc0_base_r(), + TEGRA_SOC_HWPM_IP_MSS_ISO_NISO_HUBS, true); + if (ret != 0) { + goto fail; + } + + /* MSS MCF */ + ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc0_base_r(), + TEGRA_SOC_HWPM_IP_MSS_MCF, true); + if (ret != 0) { + goto fail; + } + + /* MSS GPU HUB */ + ret = t234_hwpm_set_fs_info(hwpm, + addr_map_mss_nvlink_1_base_r(), + T234_HWPM_IP_MSS_GPU_HUB, true); + if (ret != 0) { + goto fail; + } + } + + tegra_hwpm_dbg(hwpm, hwpm_verbose, "IP floorsweep info:"); + for (i = 0U; i < T234_HWPM_IP_MAX; i++) { + chip_ip = active_chip->chip_ips[i]; + tegra_hwpm_dbg(hwpm, hwpm_verbose, "IP:%d fs_mask:0x%x", + i, chip_ip->fs_mask); + } + +fail: + return ret; +} + +int t234_hwpm_get_fs_info(struct tegra_soc_hwpm *hwpm, + u32 ip_index, u64 *fs_mask, u8 *ip_status) +{ + u32 ip_idx = 0U; + struct tegra_soc_hwpm_chip *active_chip = NULL; + struct hwpm_ip *chip_ip = NULL; + + tegra_hwpm_fn(hwpm, " "); + + /* Convert tegra_soc_hwpm_ip to internal enum */ + if (!(t234_hwpm_is_ip_active(hwpm, ip_index, &ip_idx))) { + tegra_hwpm_dbg(hwpm, hwpm_info, + "SOC hwpm IP %d is not configured", ip_index); + + *ip_status = TEGRA_SOC_HWPM_IP_STATUS_INVALID; + *fs_mask = 0ULL; + /* Remove after uapi update */ + if (ip_index == TEGRA_SOC_HWPM_IP_MSS_NVLINK) { + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "For hwpm IP %d setting status as valid", + ip_index); + *ip_status = TEGRA_SOC_HWPM_IP_STATUS_VALID; + } + } else { + active_chip = hwpm->active_chip; + chip_ip = active_chip->chip_ips[ip_idx]; + *fs_mask = chip_ip->fs_mask; + *ip_status = TEGRA_SOC_HWPM_IP_STATUS_VALID; + } + + return 0; +} diff --git a/hal/t234/t234_hwpm_mem_buf_utils.c b/hal/t234/t234_hwpm_mem_buf_utils.c new file mode 100644 index 0000000..04f1a45 --- /dev/null +++ b/hal/t234/t234_hwpm_mem_buf_utils.c @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +int t234_hwpm_disable_mem_mgmt(struct tegra_soc_hwpm *hwpm) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + tegra_hwpm_writel(hwpm, pma_perfmux, pmasys_channel_outbase_r(0), 0); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_outbaseupper_r(0), 0); + tegra_hwpm_writel(hwpm, pma_perfmux, pmasys_channel_outsize_r(0), 0); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_mem_bytes_addr_r(0), 0); + + return 0; +} + +int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream) +{ + u32 outbase_lo = 0; + u32 outbase_hi = 0; + u32 outsize = 0; + u32 mem_bytes_addr = 0; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + outbase_lo = alloc_pma_stream->stream_buf_pma_va & + pmasys_channel_outbase_ptr_m(); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_outbase_r(0), outbase_lo); + tegra_hwpm_dbg(hwpm, hwpm_verbose, "OUTBASE = 0x%x", outbase_lo); + + outbase_hi = (alloc_pma_stream->stream_buf_pma_va >> 32) & + pmasys_channel_outbaseupper_ptr_m(); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_outbaseupper_r(0), outbase_hi); + tegra_hwpm_dbg(hwpm, hwpm_verbose, "OUTBASEUPPER = 0x%x", outbase_hi); + + outsize = alloc_pma_stream->stream_buf_size & + pmasys_channel_outsize_numbytes_m(); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_outsize_r(0), outsize); + tegra_hwpm_dbg(hwpm, hwpm_verbose, "OUTSIZE = 0x%x", outsize); + + mem_bytes_addr = sg_dma_address(hwpm->mem_bytes_sgt->sgl) & + pmasys_channel_mem_bytes_addr_ptr_m(); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_mem_bytes_addr_r(0), mem_bytes_addr); + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "MEM_BYTES_ADDR = 0x%x", mem_bytes_addr); + + tegra_hwpm_writel(hwpm, pma_perfmux, pmasys_channel_mem_block_r(0), + pmasys_channel_mem_block_valid_f( + pmasys_channel_mem_block_valid_true_v())); + + return 0; +} + +int t234_hwpm_invalidate_mem_config(struct tegra_soc_hwpm *hwpm) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + tegra_hwpm_writel(hwpm, pma_perfmux, pmasys_channel_mem_block_r(0), + pmasys_channel_mem_block_valid_f( + pmasys_channel_mem_block_valid_false_v())); + + return 0; +} + +int t234_hwpm_stream_mem_bytes(struct tegra_soc_hwpm *hwpm) +{ + u32 reg_val = 0U; + u32 *mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel); + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + *mem_bytes_kernel_u32 = TEGRA_SOC_HWPM_MEM_BYTES_INVALID; + + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, + pmasys_channel_control_user_r(0)); + reg_val = set_field(reg_val, + pmasys_channel_control_user_update_bytes_m(), + pmasys_channel_control_user_update_bytes_doit_f()); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_control_user_r(0), reg_val); + + return 0; +} + +int t234_hwpm_disable_pma_streaming(struct tegra_soc_hwpm *hwpm) +{ + u32 reg_val = 0U; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + /* Disable PMA streaming */ + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, + pmasys_trigger_config_user_r(0)); + reg_val = set_field(reg_val, + pmasys_trigger_config_user_record_stream_m(), + pmasys_trigger_config_user_record_stream_disable_f()); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_trigger_config_user_r(0), reg_val); + + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, + pmasys_channel_control_user_r(0)); + reg_val = set_field(reg_val, + pmasys_channel_control_user_stream_m(), + pmasys_channel_control_user_stream_disable_f()); + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_control_user_r(0), reg_val); + + return 0; +} + +int t234_hwpm_update_mem_bytes_get_ptr(struct tegra_soc_hwpm *hwpm, + u64 mem_bump) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + if (mem_bump > (u64)U32_MAX) { + tegra_hwpm_err(hwpm, "mem_bump is out of bounds"); + return -EINVAL; + } + + tegra_hwpm_writel(hwpm, pma_perfmux, + pmasys_channel_mem_bump_r(0), mem_bump); + + return 0; +} + +u64 t234_hwpm_get_mem_bytes_put_ptr(struct tegra_soc_hwpm *hwpm) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + return (u64)tegra_hwpm_readl(hwpm, pma_perfmux, + pmasys_channel_mem_head_r(0)); +} + +bool t234_hwpm_membuf_overflow_status(struct tegra_soc_hwpm *hwpm) +{ + u32 reg_val, field_val; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + /* Currently, PMA has only one perfmux */ + hwpm_ip_perfmux *pma_perfmux = + active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U]; + + tegra_hwpm_fn(hwpm, " "); + + reg_val = tegra_hwpm_readl(hwpm, pma_perfmux, + pmasys_channel_status_secure_r(0)); + field_val = pmasys_channel_status_secure_membuf_status_v( + reg_val); + + return (field_val == + pmasys_channel_status_secure_membuf_status_overflowed_v()); +} diff --git a/hal/t234/t234_soc_hwpm_regops_allowlist.h b/hal/t234/t234_hwpm_regops_allowlist.c similarity index 72% rename from hal/t234/t234_soc_hwpm_regops_allowlist.h rename to hal/t234/t234_hwpm_regops_allowlist.c index 46376b9..b489cee 100644 --- a/hal/t234/t234_soc_hwpm_regops_allowlist.h +++ b/hal/t234/t234_hwpm_regops_allowlist.c @@ -1,36 +1,24 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . * * This file is autogenerated. Do not edit. */ -#ifndef T234_SOC_HWPM_REGOPS_ALLOWLIST_H -#define T234_SOC_HWPM_REGOPS_ALLOWLIST_H +#include "t234_hwpm_regops_allowlist.h" -struct allowlist { - u64 reg_offset; - bool zero_at_init; -}; - -struct allowlist t234_perfmon_alist[] = { +struct allowlist t234_perfmon_alist[67] = { {0x00000000, true}, {0x00000004, true}, {0x00000008, true}, @@ -100,7 +88,7 @@ struct allowlist t234_perfmon_alist[] = { {0x00000130, true}, }; -struct allowlist t234_pma_res_cmd_slice_rtr_alist[] = { +struct allowlist t234_pma_res_cmd_slice_rtr_alist[86] = { {0x00000000, false}, {0x00000008, false}, {0x0000000c, false}, @@ -189,11 +177,11 @@ struct allowlist t234_pma_res_cmd_slice_rtr_alist[] = { {0x0000075c, false}, }; -struct allowlist t234_pma_res_pma_alist[] = { +struct allowlist t234_pma_res_pma_alist[1] = { {0x00000628, true}, }; -struct allowlist t234_rtr_alist[] = { +struct allowlist t234_rtr_alist[8] = { {0x00000000, false}, {0x00000008, false}, {0x0000000c, false}, @@ -204,7 +192,7 @@ struct allowlist t234_rtr_alist[] = { {0x00000154, false}, }; -struct allowlist t234_vi_thi_alist[] = { +struct allowlist t234_vi_thi_alist[7] = { {0x0000e800, false}, {0x0000e804, false}, {0x0000e808, true}, @@ -214,7 +202,7 @@ struct allowlist t234_vi_thi_alist[] = { {0x0000e818, true}, }; -struct allowlist t234_isp_thi_alist[] = { +struct allowlist t234_isp_thi_alist[7] = { {0x000091c0, false}, {0x000091c4, false}, {0x000091c8, true}, @@ -224,7 +212,7 @@ struct allowlist t234_isp_thi_alist[] = { {0x000091d8, true}, }; -struct allowlist t234_vic_alist[] = { +struct allowlist t234_vic_alist[9] = { {0x00001088, false}, {0x000010a8, false}, {0x00001c00, true}, @@ -236,7 +224,7 @@ struct allowlist t234_vic_alist[] = { {0x00001c18, false}, }; -struct allowlist t234_ofa_alist[] = { +struct allowlist t234_ofa_alist[8] = { {0x00001088, false}, {0x000010a8, false}, {0x00003308, true}, @@ -247,7 +235,7 @@ struct allowlist t234_ofa_alist[] = { {0x0000331c, false}, }; -struct allowlist t234_pva0_pm_alist[] = { +struct allowlist t234_pva0_pm_alist[9] = { {0x00008000, false}, {0x00008004, false}, {0x00008008, false}, @@ -259,7 +247,7 @@ struct allowlist t234_pva0_pm_alist[] = { {0x00008020, true}, }; -struct allowlist t234_nvdla_alist[] = { +struct allowlist t234_nvdla_alist[34] = { {0x00001088, false}, {0x000010a8, false}, {0x0001a000, false}, @@ -296,12 +284,12 @@ struct allowlist t234_nvdla_alist[] = { {0x0001a07c, true}, }; -struct allowlist t234_mgbe_alist[] = { +struct allowlist t234_mgbe_alist[2] = { {0x00008020, true}, {0x00008024, false}, }; -struct allowlist t234_nvdec_alist[] = { +struct allowlist t234_nvdec_alist[8] = { {0x00001088, false}, {0x000010a8, false}, {0x00001b48, false}, @@ -312,7 +300,7 @@ struct allowlist t234_nvdec_alist[] = { {0x00001b5c, true}, }; -struct allowlist t234_nvenc_alist[] = { +struct allowlist t234_nvenc_alist[9] = { {0x00001088, false}, {0x000010a8, false}, {0x00002134, true}, @@ -324,50 +312,48 @@ struct allowlist t234_nvenc_alist[] = { {0x00002130, false}, }; -struct allowlist t234_pcie_ctl_alist[] = { +struct allowlist t234_pcie_ctl_alist[2] = { {0x00000174, true}, {0x00000178, false}, }; -struct allowlist t234_disp_alist[] = { +struct allowlist t234_disp_alist[3] = { {0x0001e118, true}, {0x0001e120, true}, {0x0001e124, false}, }; -struct allowlist t234_mss_channel_alist[] = { +struct allowlist t234_mss_channel_alist[2] = { {0x00000814, true}, {0x0000082c, true}, }; -struct allowlist t234_mss_nvlink_alist[] = { +struct allowlist t234_mss_nvlink_alist[1] = { {0x00000a30, true}, }; -struct allowlist t234_mc0to7_res_mss_iso_niso_hub_alist[] = { +struct allowlist t234_mc0to7_res_mss_iso_niso_hub_alist[2] = { {0x00000818, true}, {0x0000081c, true}, }; -struct allowlist t234_mc8_res_mss_iso_niso_hub_alist[] = { +struct allowlist t234_mc8_res_mss_iso_niso_hub_alist[1] = { {0x00000828, true}, }; -struct allowlist t234_mcb_mss_mcf_alist[] = { +struct allowlist t234_mcb_mss_mcf_alist[4] = { {0x00000800, true}, {0x00000820, true}, {0x0000080c, true}, {0x00000824, true}, }; -struct allowlist t234_mc0to1_mss_mcf_alist[] = { +struct allowlist t234_mc0to1_mss_mcf_alist[3] = { {0x00000808, true}, {0x00000804, true}, {0x00000810, true}, }; -struct allowlist t234_mc2to7_mss_mcf_alist[] = { +struct allowlist t234_mc2to7_mss_mcf_alist[1] = { {0x00000810, true}, }; - -#endif /* T234_SOC_HWPM_REGOPS_ALLOWLIST_H */ diff --git a/hal/t234/t234_hwpm_regops_allowlist.h b/hal/t234/t234_hwpm_regops_allowlist.h new file mode 100644 index 0000000..c2b0723 --- /dev/null +++ b/hal/t234/t234_hwpm_regops_allowlist.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file is autogenerated. Do not edit. + */ + +#ifndef T234_HWPM_REGOPS_ALLOWLIST_H +#define T234_HWPM_REGOPS_ALLOWLIST_H + +#include + +struct allowlist { + u64 reg_offset; + bool zero_at_init; +}; + +extern struct allowlist t234_perfmon_alist[67]; +extern struct allowlist t234_pma_res_cmd_slice_rtr_alist[86]; +extern struct allowlist t234_pma_res_pma_alist[1]; +extern struct allowlist t234_rtr_alist[8]; +extern struct allowlist t234_vi_thi_alist[7]; +extern struct allowlist t234_isp_thi_alist[7]; +extern struct allowlist t234_vic_alist[9]; +extern struct allowlist t234_ofa_alist[8]; +extern struct allowlist t234_pva0_pm_alist[9]; +extern struct allowlist t234_nvdla_alist[34]; +extern struct allowlist t234_mgbe_alist[2]; +extern struct allowlist t234_nvdec_alist[8]; +extern struct allowlist t234_nvenc_alist[9]; +extern struct allowlist t234_pcie_ctl_alist[2]; +extern struct allowlist t234_disp_alist[3]; +extern struct allowlist t234_mss_channel_alist[2]; +extern struct allowlist t234_mss_nvlink_alist[1]; +extern struct allowlist t234_mc0to7_res_mss_iso_niso_hub_alist[2]; +extern struct allowlist t234_mc8_res_mss_iso_niso_hub_alist[1]; +extern struct allowlist t234_mcb_mss_mcf_alist[4]; +extern struct allowlist t234_mc0to1_mss_mcf_alist[3]; +extern struct allowlist t234_mc2to7_mss_mcf_alist[1]; +#endif /* T234_HWPM_REGOPS_ALLOWLIST_H */ diff --git a/hal/t234/t234_hwpm_regops_utils.c b/hal/t234/t234_hwpm_regops_utils.c new file mode 100644 index 0000000..7d1762d --- /dev/null +++ b/hal/t234/t234_hwpm_regops_utils.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include +#include + +static bool t234_hwpm_is_addr_in_ip_perfmon(struct tegra_soc_hwpm *hwpm, + u64 phys_addr, u32 ip_idx, struct hwpm_ip_aperture **aperture) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx]; + hwpm_ip_perfmon *perfmon = NULL; + u64 address_offset = 0ULL; + u32 perfmon_idx = 0U; + + tegra_hwpm_fn(hwpm, " "); + + /* Check if phys addr doesn't belong to IP perfmon range */ + if ((phys_addr < chip_ip->perfmon_range_start) || + (phys_addr > chip_ip->perfmon_range_end)) { + return false; + } + + /* Find perfmon idx corresponding phys addr */ + address_offset = phys_addr - chip_ip->perfmon_range_start; + perfmon_idx = (u32)(address_offset / chip_ip->inst_perfmon_stride); + + perfmon = chip_ip->ip_perfmon[perfmon_idx]; + + /* Check if perfmon is populated */ + if (perfmon == NULL) { + /* + * NOTE: MSS channel and ISO NISO hub IPs have same perfmon + * range but differ in populated perfmons. In this case, + * NULL perfmon may not be a failure indication. + * Log this result for debug and return false. + */ + tegra_hwpm_dbg(hwpm, hwpm_info, + "Accessing IP %d unpopulated perfmon_idx %d", + ip_idx, perfmon_idx); + return false; + } + + /* Make sure that perfmon belongs to available IP instances */ + if ((perfmon->hw_inst_mask & chip_ip->fs_mask) == 0U) { + /* + * NOTE: User accessing this address indicates that + * case 1: perfmon (corresponding IP HW instance) is available + * case 2: computed allowlist is incorrect + * For case 1, + * Perfmon information is added statically. + * It is possible that perfmon (or IP) HW instance is not + * available in a configuration. + * This is a valid case, return false to indicate. + */ + tegra_hwpm_err(hwpm, + "accessed IP %d perfmon %d marked unavailable", + ip_idx, perfmon_idx); + return false; + } + + /* Make sure phys addr belongs to the perfmon */ + if ((phys_addr >= perfmon->start_abs_pa) && + (phys_addr <= perfmon->end_abs_pa)) { + if (t234_hwpm_check_alist(hwpm, perfmon, phys_addr)) { + *aperture = perfmon; + return true; + } + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "phys_addr 0x%llx not in IP %d perfmon_idx %d alist", + phys_addr, ip_idx, perfmon_idx); + return false; + } + + tegra_hwpm_err(hwpm, "Execution shouldn't reach here"); + return false; +} + +static bool t234_hwpm_is_addr_in_ip_perfmux(struct tegra_soc_hwpm *hwpm, + u64 phys_addr, u32 ip_idx, struct hwpm_ip_aperture **aperture) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx]; + hwpm_ip_perfmux *perfmux = NULL; + u64 address_offset = 0ULL; + u32 perfmux_idx = 0U; + + tegra_hwpm_fn(hwpm, " "); + + /* Check if phys addr doesn't belong to IP perfmux range */ + if ((phys_addr < chip_ip->perfmux_range_start) || + (phys_addr > chip_ip->perfmux_range_end)) { + return false; + } + + /* Find perfmux idx corresponding phys addr */ + address_offset = phys_addr - chip_ip->perfmux_range_start; + perfmux_idx = (u32)(address_offset / chip_ip->inst_perfmux_stride); + + perfmux = chip_ip->ip_perfmux[perfmux_idx]; + + /* Check if perfmux is populated */ + if (perfmux == NULL) { + /* + * NOTE: MSS channel and ISO NISO hub IPs have same perfmux + * range but differ in populated perfmuxes. In this case, + * NULL perfmux may not be a failure indication. + * Log this result for debug and return false. + */ + tegra_hwpm_dbg(hwpm, hwpm_info, + "Accessing IP %d unpopulated perfmux_idx %d", + ip_idx, perfmux_idx); + return false; + } + + /* Make sure that perfmux belongs to available IP instances */ + if ((perfmux->hw_inst_mask & chip_ip->fs_mask) == 0U) { + /* + * NOTE: User accessing this address indicates that + * case 1: perfmux (corresponding IP HW instance) is available + * case 2: computed allowlist is incorrect + * For case 1, + * Perfmux information is added statically. + * It is possible that perfmux (or IP) HW instance is not + * available in a configuration. + * This is a valid case, return false to indicate. + */ + tegra_hwpm_err(hwpm, + "accessed IP %d perfmux %d marked unavailable", + ip_idx, perfmux_idx); + return false; + } + + /* Make sure phys addr belongs to the perfmux */ + if ((phys_addr >= perfmux->start_abs_pa) && + (phys_addr <= perfmux->end_abs_pa)) { + if (t234_hwpm_check_alist(hwpm, perfmux, phys_addr)) { + *aperture = perfmux; + return true; + } + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "phys_addr 0x%llx not in IP %d perfmux_idx %d alist", + phys_addr, ip_idx, perfmux_idx); + return false; + } + + tegra_hwpm_err(hwpm, "Execution shouldn't reach here"); + return false; +} + +/* + * Find aperture corresponding to phys addr + */ +static int t234_hwpm_find_aperture(struct tegra_soc_hwpm *hwpm, + u64 phys_addr, struct hwpm_ip_aperture **aperture) +{ + struct tegra_soc_hwpm_chip *active_chip = NULL; + struct hwpm_ip *chip_ip = NULL; + u32 ip_idx; + + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->active_chip == NULL) { + tegra_hwpm_err(hwpm, "chip struct not populated"); + return -ENODEV; + } + + active_chip = hwpm->active_chip; + + /* Find IP index */ + for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) { + chip_ip = active_chip->chip_ips[ip_idx]; + if (chip_ip == NULL) { + tegra_hwpm_err(hwpm, "IP %d not populated as expected", + ip_idx); + return -ENODEV; + } + + if (!chip_ip->reserved) { + continue; + } + + if (t234_hwpm_is_addr_in_ip_perfmux( + hwpm, phys_addr, ip_idx, aperture)) { + return 0; + } + + if (t234_hwpm_is_addr_in_ip_perfmon( + hwpm, phys_addr, ip_idx, aperture)) { + return 0; + } + } + tegra_hwpm_err(hwpm, "addr 0x%llx not found in any IP", phys_addr); + return -EINVAL; +} + +int t234_hwpm_exec_reg_ops(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_reg_op *reg_op) +{ + int ret = 0; + u32 reg_val = 0U; + u32 ip_idx = TEGRA_SOC_HWPM_IP_INACTIVE; /* ip_idx is unknown */ + struct hwpm_ip_aperture *aperture = NULL; + + tegra_hwpm_fn(hwpm, " "); + + /* Find IP aperture containing phys_addr in allowlist */ + ret = t234_hwpm_find_aperture(hwpm, reg_op->phys_addr, &aperture); + if (ret < 0) { + if (ret == -ENODEV) { + tegra_hwpm_err(hwpm, "HWPM structures not populated"); + reg_op->status = + TEGRA_SOC_HWPM_REG_OP_STATUS_INSUFFICIENT_PERMISSIONS; + } else { + /* Phys addr not available in IP allowlist */ + tegra_hwpm_err(hwpm, + "Phys addr 0x%llx not available in IP %d", + reg_op->phys_addr, ip_idx); + reg_op->status = + TEGRA_SOC_HWPM_REG_OP_STATUS_INVALID_ADDR; + } + goto fail; + } + + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "Found phys addr (0x%llx): aperture (0x%llx-0x%llx)", + aperture->start_abs_pa, aperture->end_abs_pa); + + switch (reg_op->cmd) { + case TEGRA_SOC_HWPM_REG_OP_CMD_RD32: + reg_op->reg_val_lo = regops_readl(hwpm, + aperture, + reg_op->phys_addr); + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; + break; + + case TEGRA_SOC_HWPM_REG_OP_CMD_RD64: + reg_op->reg_val_lo = regops_readl(hwpm, + aperture, + reg_op->phys_addr); + reg_op->reg_val_hi = regops_readl(hwpm, + aperture, + reg_op->phys_addr + 4ULL); + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; + break; + + /* Read Modify Write operation */ + case TEGRA_SOC_HWPM_REG_OP_CMD_WR32: + reg_val = regops_readl(hwpm, aperture, reg_op->phys_addr); + reg_val = set_field(reg_val, reg_op->mask_lo, + reg_op->reg_val_lo); + regops_writel(hwpm, aperture, reg_op->phys_addr, reg_val); + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; + break; + + /* Read Modify Write operation */ + case TEGRA_SOC_HWPM_REG_OP_CMD_WR64: + /* Lower 32 bits */ + reg_val = regops_readl(hwpm, aperture, reg_op->phys_addr); + reg_val = set_field(reg_val, reg_op->mask_lo, + reg_op->reg_val_lo); + regops_writel(hwpm, aperture, reg_op->phys_addr, reg_val); + + /* Upper 32 bits */ + reg_val = regops_readl(hwpm, aperture, + reg_op->phys_addr + 4ULL); + reg_val = set_field(reg_val, reg_op->mask_hi, + reg_op->reg_val_hi); + regops_writel(hwpm, aperture, reg_op->phys_addr, reg_val); + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; + break; + + default: + tegra_hwpm_err(hwpm, "Invalid reg op command(%u)", reg_op->cmd); + reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_INVALID_CMD; + ret = -EINVAL; + break; + } +fail: + return ret; +} diff --git a/hal/t234/t234_hwpm_resource_utils.c b/hal/t234/t234_hwpm_resource_utils.c new file mode 100644 index 0000000..6c62d50 --- /dev/null +++ b/hal/t234/t234_hwpm_resource_utils.c @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +static int t234_hwpm_perfmon_enable(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmon *perfmon) +{ + u32 reg_val; + + tegra_hwpm_fn(hwpm, " "); + + /* Enable */ + tegra_hwpm_dbg(hwpm, hwpm_verbose, "Enabling PERFMON(0x%llx - 0x%llx)", + perfmon->start_abs_pa, perfmon->end_abs_pa); + + reg_val = tegra_hwpm_readl(hwpm, perfmon, + pmmsys_sys0_enginestatus_r(0)); + reg_val = set_field(reg_val, pmmsys_sys0_enginestatus_enable_m(), + pmmsys_sys0_enginestatus_enable_out_f()); + tegra_hwpm_writel(hwpm, perfmon, + pmmsys_sys0_enginestatus_r(0), reg_val); + + return 0; +} + +static int t234_hwpm_perfmux_reserve(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmux *perfmux) +{ + int err = 0; + int ret = 0; + + tegra_hwpm_fn(hwpm, " "); + + /* + * Indicate that HWPM driver is initializing monitoring. + * Since perfmux is controlled by IP, indicate monitoring enabled + * by disabling IP power management. + */ + /* Make sure that ip_ops are initialized */ + if ((perfmux->ip_ops.ip_dev != NULL) && + (perfmux->ip_ops.hwpm_ip_pm != NULL)) { + err = (*perfmux->ip_ops.hwpm_ip_pm)( + perfmux->ip_ops.ip_dev, true); + if (err != 0) { + tegra_hwpm_err(hwpm, "Runtime PM disable failed"); + } + } else { + tegra_hwpm_dbg(hwpm, hwpm_verbose, "Runtime PM not configured"); + } + + perfmux->start_pa = perfmux->start_abs_pa; + perfmux->end_pa = perfmux->end_abs_pa; + + /* Allocate fake registers */ + if (hwpm->fake_registers_enabled) { + u64 num_regs = 0; + u32 **fake_regs = &perfmux->fake_registers; + + num_regs = (perfmux->end_pa + 1 - perfmux->start_pa) / + sizeof(u32); + *fake_regs = (u32 *)kzalloc(sizeof(u32) * num_regs, GFP_KERNEL); + if (!(*fake_regs)) { + tegra_hwpm_err(hwpm, "Aperture(0x%llx - 0x%llx):" + " Couldn't allocate memory for fake registers", + perfmux->start_pa, perfmux->end_pa); + ret = -ENOMEM; + goto fail; + } + } + +fail: + return ret; +} + +static int t234_hwpm_perfmux_disable(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmux *perfmux) +{ + int err = 0; + + tegra_hwpm_fn(hwpm, " "); + + /* + * Indicate that HWPM monitoring is disabled/closed. + * Since perfmux is controlled by IP, indicate monitoring disabled + * by enabling IP power management. + */ + /* Make sure that ip_ops are initialized */ + if ((perfmux->ip_ops.ip_dev != NULL) && + (perfmux->ip_ops.hwpm_ip_pm != NULL)) { + err = (*perfmux->ip_ops.hwpm_ip_pm)( + perfmux->ip_ops.ip_dev, false); + if (err != 0) { + tegra_hwpm_err(hwpm, "Runtime PM enable failed"); + } + } else { + tegra_hwpm_dbg(hwpm, hwpm_verbose, "Runtime PM not configured"); + } + + return 0; +} + +static int t234_hwpm_perfmux_release(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmux *perfmux) +{ + tegra_hwpm_fn(hwpm, " "); + + /* + * Release + * This is only required for for fake registers + */ + if (perfmux->fake_registers) { + kfree(perfmux->fake_registers); + perfmux->fake_registers = NULL; + } + + return 0; +} + +int t234_hwpm_perfmon_reserve(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmon *perfmon) +{ + struct resource *res = NULL; + + tegra_hwpm_fn(hwpm, " "); + + /* Reserve */ + res = platform_get_resource_byname(hwpm->pdev, + IORESOURCE_MEM, perfmon->name); + if ((!res) || (res->start == 0) || (res->end == 0)) { + tegra_hwpm_err(hwpm, "Failed to get perfmon %s", perfmon->name); + return -ENOMEM; + } + + perfmon->dt_mmio = devm_ioremap(hwpm->dev, res->start, + resource_size(res)); + if (IS_ERR(perfmon->dt_mmio)) { + tegra_hwpm_err(hwpm, "Couldn't map perfmon %s", perfmon->name); + return PTR_ERR(perfmon->dt_mmio); + } + + perfmon->start_pa = res->start; + perfmon->end_pa = res->end; + + if (hwpm->fake_registers_enabled) { + u64 num_regs = (res->end + 1 - res->start) / sizeof(u32); + perfmon->fake_registers = (u32 *)kzalloc(sizeof(u32) * num_regs, + GFP_KERNEL); + if (perfmon->fake_registers == NULL) { + tegra_hwpm_err(hwpm, "Perfmon (0x%llx - 0x%llx) " + "Couldn't allocate memory for fake regs", + perfmon->start_abs_pa, perfmon->end_abs_pa); + return -ENOMEM; + } + } + return 0; +} + +static int t234_hwpm_perfmon_disable(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmon *perfmon) +{ + u32 reg_val; + + tegra_hwpm_fn(hwpm, " "); + + /* Disable */ + tegra_hwpm_dbg(hwpm, hwpm_verbose, "Disabling PERFMON(0x%llx - 0x%llx)", + perfmon->start_abs_pa, perfmon->end_abs_pa); + + reg_val = tegra_hwpm_readl(hwpm, perfmon, pmmsys_control_r(0)); + reg_val = set_field(reg_val, pmmsys_control_mode_m(), + pmmsys_control_mode_disable_f()); + tegra_hwpm_writel(hwpm, perfmon, pmmsys_control_r(0), reg_val); + + return 0; +} + +int t234_hwpm_perfmon_release(struct tegra_soc_hwpm *hwpm, + hwpm_ip_perfmon *perfmon) +{ + tegra_hwpm_fn(hwpm, " "); + + if (perfmon->dt_mmio == NULL) { + tegra_hwpm_err(hwpm, "Perfmon was not mapped"); + return -EINVAL; + } + devm_iounmap(hwpm->dev, perfmon->dt_mmio); + perfmon->dt_mmio = NULL; + perfmon->start_pa = 0ULL; + perfmon->end_pa = 0ULL; + + if (perfmon->fake_registers) { + kfree(perfmon->fake_registers); + perfmon->fake_registers = NULL; + } + return 0; +} + +int t234_hwpm_release_all_resources(struct tegra_soc_hwpm *hwpm) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = NULL; + hwpm_ip_perfmon *perfmon = NULL; + hwpm_ip_perfmux *perfmux = NULL; + u32 ip_idx; + u32 perfmux_idx, perfmon_idx; + unsigned long floorsweep_info = 0UL; + unsigned long inst_idx = 0UL; + int err = 0; + + tegra_hwpm_fn(hwpm, " "); + + for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) { + chip_ip = active_chip->chip_ips[ip_idx]; + + /* PMA and RTR will be released later */ + if ((ip_idx == T234_HWPM_IP_PMA) || + (ip_idx == T234_HWPM_IP_RTR)) { + continue; + } + + /* Disable only available IPs */ + if (chip_ip->override_enable) { + /* IP not available */ + continue; + } + + /* Disable and release only reserved IPs */ + if (!chip_ip->reserved) { + continue; + } + + if (chip_ip->fs_mask == 0U) { + /* No IP instance is available */ + continue; + } + + floorsweep_info = (unsigned long)chip_ip->fs_mask; + + for_each_set_bit(inst_idx, &floorsweep_info, 32U) { + /* Release all perfmon associated with inst_idx */ + for (perfmon_idx = 0U; + perfmon_idx < chip_ip->num_perfmon_slots; + perfmon_idx++) { + perfmon = chip_ip->ip_perfmon[perfmon_idx]; + + if (perfmon == NULL) { + continue; + } + + if (perfmon->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + err = t234_hwpm_perfmon_disable(hwpm, perfmon); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmon %d disable failed", + ip_idx, perfmon_idx); + } + + err = t234_hwpm_perfmon_release(hwpm, perfmon); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmon %d release failed", + ip_idx, perfmon_idx); + } + } + + /* Release all perfmux associated with inst_idx */ + for (perfmux_idx = 0U; + perfmux_idx < chip_ip->num_perfmux_slots; + perfmux_idx++) { + perfmux = chip_ip->ip_perfmux[perfmux_idx]; + + if (perfmux == NULL) { + continue; + } + + if (perfmux->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + err = t234_hwpm_perfmux_disable(hwpm, perfmux); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmux %d disable failed", + ip_idx, perfmux_idx); + } + + err = t234_hwpm_perfmux_release(hwpm, perfmux); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmux %d release failed", + ip_idx, perfmux_idx); + } + } + } + chip_ip->reserved = false; + } + return 0; +} + +/* ip_idx is wrt enum t234_hwpm_active_ips */ +int t234_hwpm_reserve_given_resource(struct tegra_soc_hwpm *hwpm, u32 ip_idx) +{ + int err = 0, ret = 0; + u32 perfmux_idx, perfmon_idx; + unsigned long inst_idx = 0UL; + unsigned long floorsweep_info = 0UL, reserved_insts = 0UL; + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx]; + hwpm_ip_perfmon *perfmon = NULL; + hwpm_ip_perfmux *perfmux = NULL; + + floorsweep_info = (unsigned long)chip_ip->fs_mask; + + tegra_hwpm_fn(hwpm, " "); + + tegra_hwpm_dbg(hwpm, hwpm_info, "Reserve IP %d, fs_mask 0x%x", + ip_idx, chip_ip->fs_mask); + + /* PMA and RTR are already reserved */ + if ((ip_idx == T234_HWPM_IP_PMA) || (ip_idx == T234_HWPM_IP_RTR)) { + return 0; + } + + for_each_set_bit(inst_idx, &floorsweep_info, 32U) { + /* Reserve all perfmon belonging to this instance */ + for (perfmon_idx = 0U; perfmon_idx < chip_ip->num_perfmon_slots; + perfmon_idx++) { + perfmon = chip_ip->ip_perfmon[perfmon_idx]; + + if (perfmon == NULL) { + continue; + } + + if (perfmon->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + err = t234_hwpm_perfmon_reserve(hwpm, perfmon); + if (err != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmon %d reserve failed", + ip_idx, perfmon_idx); + goto fail; + } + } + + /* Reserve all perfmux belonging to this instance */ + for (perfmux_idx = 0U; perfmux_idx < chip_ip->num_perfmux_slots; + perfmux_idx++) { + perfmux = chip_ip->ip_perfmux[perfmux_idx]; + + if (perfmux == NULL) { + continue; + } + + if (perfmux->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + err = t234_hwpm_perfmux_reserve(hwpm, perfmux); + if (err != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d reserve failed", + ip_idx, perfmux_idx); + goto fail; + } + } + + reserved_insts |= BIT(inst_idx); + } + chip_ip->reserved = true; + + return 0; +fail: + /* release reserved instances */ + for_each_set_bit(inst_idx, &reserved_insts, 32U) { + /* Release all perfmon belonging to this instance */ + for (perfmon_idx = 0U; perfmon_idx < chip_ip->num_perfmon_slots; + perfmon_idx++) { + perfmon = chip_ip->ip_perfmon[perfmon_idx]; + + if (perfmon == NULL) { + continue; + } + + if (perfmon->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + ret = t234_hwpm_perfmon_disable(hwpm, perfmon); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmon %d disable failed", + ip_idx, perfmon_idx); + } + + ret = t234_hwpm_perfmon_release(hwpm, perfmon); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmon %d release failed", + ip_idx, perfmon_idx); + } + } + + /* Release all perfmux belonging to this instance */ + for (perfmux_idx = 0U; perfmux_idx < chip_ip->num_perfmux_slots; + perfmux_idx++) { + perfmux = chip_ip->ip_perfmux[perfmux_idx]; + + if (perfmux == NULL) { + continue; + } + + if (perfmux->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + ret = t234_hwpm_perfmux_disable(hwpm, perfmux); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d disable failed", + ip_idx, perfmux_idx); + } + + ret = t234_hwpm_perfmux_release(hwpm, perfmux); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "IP %d perfmux %d release failed", + ip_idx, perfmux_idx); + } + } + } + return err; +} + +int t234_hwpm_bind_reserved_resources(struct tegra_soc_hwpm *hwpm) +{ + struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip; + struct hwpm_ip *chip_ip = NULL; + u32 ip_idx; + u32 perfmux_idx, perfmon_idx; + unsigned long inst_idx = 0UL; + unsigned long floorsweep_info = 0UL; + int err = 0; + hwpm_ip_perfmon *perfmon = NULL; + hwpm_ip_perfmux *perfmux = NULL; + + tegra_hwpm_fn(hwpm, " "); + + for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) { + chip_ip = active_chip->chip_ips[ip_idx]; + + /* Skip unavailable IPs */ + if (!chip_ip->reserved) { + continue; + } + + if (chip_ip->fs_mask == 0U) { + /* No IP instance is available */ + continue; + } + + floorsweep_info = (unsigned long)chip_ip->fs_mask; + + for_each_set_bit(inst_idx, &floorsweep_info, 32U) { + /* Zero out necessary perfmux registers */ + for (perfmux_idx = 0U; + perfmux_idx < chip_ip->num_perfmux_slots; + perfmux_idx++) { + perfmux = chip_ip->ip_perfmux[perfmux_idx]; + + if (perfmux == NULL) { + continue; + } + + if (perfmux->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + err = active_chip->zero_alist_regs( + hwpm, perfmux); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmux %d zero regs failed", + ip_idx, perfmux_idx); + } + } + + /* Zero out necessary perfmon registers */ + /* And enable reporting of PERFMON status */ + for (perfmon_idx = 0U; + perfmon_idx < chip_ip->num_perfmon_slots; + perfmon_idx++) { + perfmon = chip_ip->ip_perfmon[perfmon_idx]; + + if (perfmon == NULL) { + continue; + } + + if (perfmon->hw_inst_mask != BIT(inst_idx)) { + continue; + } + + err = active_chip->zero_alist_regs( + hwpm, perfmon); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmon %d zero regs failed", + ip_idx, perfmon_idx); + } + + err = t234_hwpm_perfmon_enable(hwpm, perfmon); + if (err != 0) { + tegra_hwpm_err(hwpm, "IP %d" + " perfmon %d enable failed", + ip_idx, perfmon_idx); + } + } + } + } + return err; +} diff --git a/hal/t234/t234_soc_hwpm_init.c b/hal/t234/t234_soc_hwpm_init.c deleted file mode 100644 index 2110363..0000000 --- a/hal/t234/t234_soc_hwpm_init.c +++ /dev/null @@ -1,1008 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -void __iomem *t234_dt_apertures[T234_SOC_HWPM_NUM_DT_APERTURES]; -struct tegra_soc_hwpm_ip_ops t234_ip_info[T234_SOC_HWPM_NUM_DT_APERTURES]; - -/* - * Normally there is a 1-to-1 mapping between an MMIO aperture and a - * hwpm_resource_aperture struct. But the PMA MMIO aperture is used in - * multiple hwpm_resource_aperture structs. Therefore, we have to share the fake - * register array between these hwpm_resource_aperture structs. This is why we - * have to define the fake register array globally. For all other 1-to-1 - * mapping apertures the fake register arrays are directly embedded inside the - * hwpm_resource_aperture structs. - */ -u32 *t234_pma_fake_regs; - -struct hwpm_resource t234_hwpm_resources[TERGA_SOC_HWPM_NUM_RESOURCES] = { - [TEGRA_SOC_HWPM_RESOURCE_VI] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_vi_map), - .map = t234_vi_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_ISP] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_isp_map), - .map = t234_isp_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_VIC] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_vic_map), - .map = t234_vic_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_OFA] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_ofa_map), - .map = t234_ofa_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_PVA] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_pva_map), - .map = t234_pva_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_NVDLA] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_nvdla_map), - .map = t234_nvdla_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_MGBE] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_mgbe_map), - .map = t234_mgbe_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_SCF] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_scf_map), - .map = t234_scf_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_NVDEC] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_nvdec_map), - .map = t234_nvdec_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_NVENC] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_nvenc_map), - .map = t234_nvenc_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_PCIE] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_pcie_map), - .map = t234_pcie_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_DISPLAY] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_display_map), - .map = t234_display_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_MSS_CHANNEL] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_mss_channel_map), - .map = t234_mss_channel_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_MSS_GPU_HUB] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_mss_gpu_hub_map), - .map = t234_mss_gpu_hub_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_MSS_ISO_NISO_HUBS] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_mss_iso_niso_hub_map), - .map = t234_mss_iso_niso_hub_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_MSS_MCF] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_mss_mcf_map), - .map = t234_mss_mcf_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_PMA] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_pma_map), - .map = t234_pma_map, - }, - [TEGRA_SOC_HWPM_RESOURCE_CMD_SLICE_RTR] = { - .reserved = false, - .map_size = ARRAY_SIZE(t234_cmd_slice_rtr_map), - .map = t234_cmd_slice_rtr_map, - }, -}; - -void __iomem **t234_soc_hwpm_init_dt_apertures(void) -{ - return t234_dt_apertures; -} - -struct tegra_soc_hwpm_ip_ops *t234_soc_hwpm_init_ip_ops_info(void) -{ - return t234_ip_info; -} - -bool t234_soc_hwpm_is_perfmon(u32 dt_aperture) -{ - return IS_PERFMON(dt_aperture); -} - -u64 t234_soc_hwpm_get_perfmon_base(u32 dt_aperture) -{ - if (t234_soc_hwpm_is_perfmon(dt_aperture)) { - return PERFMON_BASE(dt_aperture); - } else if (dt_aperture == T234_SOC_HWPM_PMA_DT) { - return addr_map_pma_base_r(); - } else if (dt_aperture == T234_SOC_HWPM_RTR_DT) { - return addr_map_rtr_base_r(); - } else { - return 0ULL; - } -} - -bool t234_soc_hwpm_is_dt_aperture(u32 dt_aperture) -{ - return (dt_aperture < T234_SOC_HWPM_NUM_DT_APERTURES); -} - -u32 t234_soc_hwpm_get_ip_aperture(struct tegra_soc_hwpm *hwpm, - u64 phys_address, u64 *ip_base_addr) -{ - enum t234_soc_hwpm_dt_aperture aperture = - TEGRA_SOC_HWPM_DT_APERTURE_INVALID; - - if ((phys_address >= addr_map_vi_thi_base_r()) && - (phys_address <= addr_map_vi_thi_limit_r())) { - aperture = T234_SOC_HWPM_VI0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_vi_thi_base_r(); - } - } else if ((phys_address >= addr_map_vi2_thi_base_r()) && - (phys_address <= addr_map_vi2_thi_limit_r())) { - aperture = T234_SOC_HWPM_VI1_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_vi2_thi_base_r(); - } - } else if ((phys_address >= addr_map_isp_thi_base_r()) && - (phys_address <= addr_map_isp_thi_limit_r())) { - aperture = T234_SOC_HWPM_ISP0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_isp_thi_base_r(); - } - } else if ((phys_address >= addr_map_vic_base_r()) && - (phys_address <= addr_map_vic_limit_r())) { - aperture = T234_SOC_HWPM_VICA0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_vic_base_r(); - } - } else if ((phys_address >= addr_map_ofa_base_r()) && - (phys_address <= addr_map_ofa_limit_r())) { - aperture = T234_SOC_HWPM_OFAA0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_ofa_base_r(); - } - } else if ((phys_address >= addr_map_pva0_pm_base_r()) && - (phys_address <= addr_map_pva0_pm_limit_r())) { - aperture = T234_SOC_HWPM_PVAV0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pva0_pm_base_r(); - } - } else if ((phys_address >= addr_map_nvdla0_base_r()) && - (phys_address <= addr_map_nvdla0_limit_r())) { - aperture = T234_SOC_HWPM_NVDLAB0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_nvdla0_base_r(); - } - } else if ((phys_address >= addr_map_nvdla1_base_r()) && - (phys_address <= addr_map_nvdla1_limit_r())) { - aperture = T234_SOC_HWPM_NVDLAB1_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_nvdla1_base_r(); - } - } else if ((phys_address >= addr_map_disp_base_r()) && - (phys_address <= addr_map_disp_limit_r())) { - aperture = T234_SOC_HWPM_NVDISPLAY0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_disp_base_r(); - } - } else if ((phys_address >= addr_map_mgbe0_base_r()) && - (phys_address <= addr_map_mgbe0_limit_r())) { - aperture = T234_SOC_HWPM_MGBE0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mgbe0_base_r(); - } - } else if ((phys_address >= addr_map_mgbe1_base_r()) && - (phys_address <= addr_map_mgbe1_limit_r())) { - aperture = T234_SOC_HWPM_MGBE1_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mgbe1_base_r(); - } - } else if ((phys_address >= addr_map_mgbe2_base_r()) && - (phys_address <= addr_map_mgbe2_limit_r())) { - aperture = T234_SOC_HWPM_MGBE2_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mgbe2_base_r(); - } - } else if ((phys_address >= addr_map_mgbe3_base_r()) && - (phys_address <= addr_map_mgbe3_limit_r())) { - aperture = T234_SOC_HWPM_MGBE3_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mgbe3_base_r(); - } - } else if ((phys_address >= addr_map_nvdec_base_r()) && - (phys_address <= addr_map_nvdec_limit_r())) { - aperture = T234_SOC_HWPM_NVDECA0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_nvdec_base_r(); - } - } else if ((phys_address >= addr_map_nvenc_base_r()) && - (phys_address <= addr_map_nvenc_limit_r())) { - aperture = T234_SOC_HWPM_NVENCA0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_nvenc_base_r(); - } - } else if ((phys_address >= addr_map_mss_nvlink_1_base_r()) && - (phys_address <= addr_map_mss_nvlink_1_limit_r())) { - aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mss_nvlink_1_base_r(); - } - } else if ((phys_address >= addr_map_mss_nvlink_2_base_r()) && - (phys_address <= addr_map_mss_nvlink_2_limit_r())) { - aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mss_nvlink_2_base_r(); - } - } else if ((phys_address >= addr_map_mss_nvlink_3_base_r()) && - (phys_address <= addr_map_mss_nvlink_3_limit_r())) { - aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mss_nvlink_3_base_r(); - } - } else if ((phys_address >= addr_map_mss_nvlink_4_base_r()) && - (phys_address <= addr_map_mss_nvlink_4_limit_r())) { - aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mss_nvlink_4_base_r(); - } - } else if ((phys_address >= addr_map_mss_nvlink_5_base_r()) && - (phys_address <= addr_map_mss_nvlink_5_limit_r())) { - aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mss_nvlink_5_base_r(); - } - } else if ((phys_address >= addr_map_mss_nvlink_6_base_r()) && - (phys_address <= addr_map_mss_nvlink_6_limit_r())) { - aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mss_nvlink_6_base_r(); - } - } else if ((phys_address >= addr_map_mss_nvlink_7_base_r()) && - (phys_address <= addr_map_mss_nvlink_7_limit_r())) { - aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mss_nvlink_7_base_r(); - } - } else if ((phys_address >= addr_map_mss_nvlink_8_base_r()) && - (phys_address <= addr_map_mss_nvlink_8_limit_r())) { - aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mss_nvlink_8_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c0_ctl_base_r()) && - (phys_address <= addr_map_pcie_c0_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c0_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c1_ctl_base_r()) && - (phys_address <= addr_map_pcie_c1_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE1_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c1_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c2_ctl_base_r()) && - (phys_address <= addr_map_pcie_c2_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE2_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c2_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c3_ctl_base_r()) && - (phys_address <= addr_map_pcie_c3_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE3_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c3_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c4_ctl_base_r()) && - (phys_address <= addr_map_pcie_c4_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE4_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c4_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c5_ctl_base_r()) && - (phys_address <= addr_map_pcie_c5_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE5_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c5_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c6_ctl_base_r()) && - (phys_address <= addr_map_pcie_c6_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE6_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c6_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c7_ctl_base_r()) && - (phys_address <= addr_map_pcie_c7_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE7_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c7_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c8_ctl_base_r()) && - (phys_address <= addr_map_pcie_c8_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE8_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c8_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c9_ctl_base_r()) && - (phys_address <= addr_map_pcie_c9_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE9_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c9_ctl_base_r(); - } - } else if ((phys_address >= addr_map_pcie_c10_ctl_base_r()) && - (phys_address <= addr_map_pcie_c10_ctl_limit_r())) { - aperture = T234_SOC_HWPM_PCIE10_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_pcie_c10_ctl_base_r(); - } - } else if ((phys_address >= addr_map_mc0_base_r()) && - (phys_address <= addr_map_mc0_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTA0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc0_base_r(); - } - } else if ((phys_address >= addr_map_mc1_base_r()) && - (phys_address <= addr_map_mc1_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTA1_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc1_base_r(); - } - } else if ((phys_address >= addr_map_mc2_base_r()) && - (phys_address <= addr_map_mc2_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTA2_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc2_base_r(); - } - } else if ((phys_address >= addr_map_mc3_base_r()) && - (phys_address <= addr_map_mc3_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTA3_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc3_base_r(); - } - } else if ((phys_address >= addr_map_mc4_base_r()) && - (phys_address <= addr_map_mc4_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTB0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc4_base_r(); - } - } else if ((phys_address >= addr_map_mc5_base_r()) && - (phys_address <= addr_map_mc5_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTB1_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc5_base_r(); - } - } else if ((phys_address >= addr_map_mc6_base_r()) && - (phys_address <= addr_map_mc6_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTB2_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc6_base_r(); - } - } else if ((phys_address >= addr_map_mc7_base_r()) && - (phys_address <= addr_map_mc7_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTB3_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc7_base_r(); - } - } else if ((phys_address >= addr_map_mc8_base_r()) && - (phys_address <= addr_map_mc8_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTC0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc8_base_r(); - } - } else if ((phys_address >= addr_map_mc9_base_r()) && - (phys_address <= addr_map_mc9_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTC1_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc9_base_r(); - } - } else if ((phys_address >= addr_map_mc10_base_r()) && - (phys_address <= addr_map_mc10_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTC2_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc10_base_r(); - } - } else if ((phys_address >= addr_map_mc11_base_r()) && - (phys_address <= addr_map_mc11_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTC3_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc11_base_r(); - } - } else if ((phys_address >= addr_map_mc4_base_r()) && - (phys_address <= addr_map_mc12_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTD0_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc12_base_r(); - } - } else if ((phys_address >= addr_map_mc13_base_r()) && - (phys_address <= addr_map_mc13_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTD1_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc13_base_r(); - } - } else if ((phys_address >= addr_map_mc14_base_r()) && - (phys_address <= addr_map_mc14_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTD2_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc14_base_r(); - } - } else if ((phys_address >= addr_map_mc15_base_r()) && - (phys_address <= addr_map_mc15_limit_r())) { - aperture = T234_SOC_HWPM_MSSCHANNELPARTD3_PERFMON_DT; - if (ip_base_addr) { - *ip_base_addr = addr_map_mc15_base_r(); - } - } - - return (u32)aperture; -} - -int t234_soc_hwpm_fs_info_init(struct tegra_soc_hwpm *hwpm) -{ - hwpm->hwpm_resources = t234_hwpm_resources; - - if (tegra_platform_is_vsp()) { - /* Static IP instances as per VSP netlist */ - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_VIC] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_CHANNEL] = 0xF; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_GPU_HUB] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_ISO_NISO_HUBS] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_MCF] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_NVLINK] = 0x1; - } - if (tegra_platform_is_silicon()) { - /* Static IP instances corresponding to silicon */ - // hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_VI] = 0x3; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_ISP] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_VIC] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_OFA] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_PVA] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_NVDLA] = 0x3; - // hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MGBE] = 0xF; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_SCF] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_NVDEC] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_NVENC] = 0x1; - // hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_PCIE] = 0x32; - // hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_DISPLAY] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_CHANNEL] = 0xFFFF; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_GPU_HUB] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_ISO_NISO_HUBS] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_MCF] = 0x1; - hwpm->ip_fs_info[TEGRA_SOC_HWPM_IP_MSS_NVLINK] = 0x1; - } - return 0; -} - -int t234_soc_hwpm_pma_rtr_map(struct tegra_soc_hwpm *hwpm) -{ - struct resource *res = NULL; - u64 num_regs = 0ULL; - - hwpm->dt_apertures[T234_SOC_HWPM_PMA_DT] = - of_iomap(hwpm->np, T234_SOC_HWPM_PMA_DT); - if (!hwpm->dt_apertures[T234_SOC_HWPM_PMA_DT]) { - tegra_soc_hwpm_err("Couldn't map the PMA aperture"); - return -ENOMEM; - } - res = platform_get_resource(hwpm->pdev, - IORESOURCE_MEM, - T234_SOC_HWPM_PMA_DT); - if ((!res) || (res->start == 0) || (res->end == 0)) { - tegra_soc_hwpm_err("Invalid resource for PMA"); - return -ENOMEM; - } - t234_pma_map[1].start_pa = res->start; - t234_pma_map[1].end_pa = res->end; - t234_cmd_slice_rtr_map[0].start_pa = res->start; - t234_cmd_slice_rtr_map[0].end_pa = res->end; - if (hwpm->fake_registers_enabled) { - num_regs = (res->end + 1 - res->start) / sizeof(*t234_pma_fake_regs); - t234_pma_fake_regs = (u32 *)kzalloc(sizeof(*t234_pma_fake_regs) * num_regs, - GFP_KERNEL); - if (!t234_pma_fake_regs) { - tegra_soc_hwpm_err("Couldn't allocate memory for PMA" - " fake registers"); - return -ENOMEM; - } - t234_pma_map[1].fake_registers = t234_pma_fake_regs; - t234_cmd_slice_rtr_map[0].fake_registers = t234_pma_fake_regs; - } - - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_PMA].reserved = true; - - hwpm->dt_apertures[T234_SOC_HWPM_RTR_DT] = - of_iomap(hwpm->np, T234_SOC_HWPM_RTR_DT); - if (!hwpm->dt_apertures[T234_SOC_HWPM_RTR_DT]) { - tegra_soc_hwpm_err("Couldn't map the RTR aperture"); - return -ENOMEM; - } - res = platform_get_resource(hwpm->pdev, - IORESOURCE_MEM, - T234_SOC_HWPM_RTR_DT); - if ((!res) || (res->start == 0) || (res->end == 0)) { - tegra_soc_hwpm_err("Invalid resource for RTR"); - return -ENOMEM; - } - t234_cmd_slice_rtr_map[1].start_pa = res->start; - t234_cmd_slice_rtr_map[1].end_pa = res->end; - if (hwpm->fake_registers_enabled) { - num_regs = (res->end + 1 - res->start) / - sizeof(*t234_cmd_slice_rtr_map[1].fake_registers); - t234_cmd_slice_rtr_map[1].fake_registers = - (u32 *)kzalloc( - sizeof(*t234_cmd_slice_rtr_map[1].fake_registers) * - num_regs, - GFP_KERNEL); - if (!t234_cmd_slice_rtr_map[1].fake_registers) { - tegra_soc_hwpm_err("Couldn't allocate memory for RTR" - " fake registers"); - return -ENOMEM; - } - } - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_CMD_SLICE_RTR].reserved = true; - return 0; -} - -int t234_soc_hwpm_pma_rtr_unmap(struct tegra_soc_hwpm *hwpm) -{ - if (hwpm->dt_apertures[T234_SOC_HWPM_PMA_DT]) { - iounmap(hwpm->dt_apertures[T234_SOC_HWPM_PMA_DT]); - hwpm->dt_apertures[T234_SOC_HWPM_PMA_DT] = NULL; - } - t234_pma_map[1].start_pa = 0; - t234_pma_map[1].end_pa = 0; - t234_cmd_slice_rtr_map[0].start_pa = 0; - t234_cmd_slice_rtr_map[0].end_pa = 0; - if (t234_pma_fake_regs) { - kfree(t234_pma_fake_regs); - t234_pma_fake_regs = NULL; - t234_pma_map[1].fake_registers = NULL; - t234_cmd_slice_rtr_map[0].fake_registers = NULL; - } - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_PMA].reserved = false; - - if (hwpm->dt_apertures[T234_SOC_HWPM_RTR_DT]) { - iounmap(hwpm->dt_apertures[T234_SOC_HWPM_RTR_DT]); - hwpm->dt_apertures[T234_SOC_HWPM_RTR_DT] = NULL; - } - t234_cmd_slice_rtr_map[1].start_pa = 0; - t234_cmd_slice_rtr_map[1].end_pa = 0; - if (t234_cmd_slice_rtr_map[1].fake_registers) { - kfree(t234_cmd_slice_rtr_map[1].fake_registers); - t234_cmd_slice_rtr_map[1].fake_registers = NULL; - } - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_CMD_SLICE_RTR].reserved = false; - - return 0; -} - - -int t234_soc_hwpm_disable_pma_triggers(struct tegra_soc_hwpm *hwpm) -{ - int err = 0; - int ret = 0; - bool timeout = false; - u32 field_mask = 0; - u32 field_val = 0; - - /* Disable PMA triggers */ - err = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_trigger_config_user_r(0) - addr_map_pma_base_r(), - pmasys_trigger_config_user_pma_pulse_m(), - pmasys_trigger_config_user_pma_pulse_disable_f(), - false, false); - RELEASE_FAIL("Unable to disable PMA triggers"); - - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_sys_trigger_start_mask_r() - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_sys_trigger_start_maskb_r() - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_sys_trigger_stop_mask_r() - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_sys_trigger_stop_maskb_r() - addr_map_pma_base_r(), 0); - - /* Wait for PERFMONs, ROUTER, and PMA to idle */ - timeout = HWPM_TIMEOUT(pmmsys_sys0router_perfmonstatus_merged_v( - hwpm_readl(hwpm, T234_SOC_HWPM_RTR_DT, - pmmsys_sys0router_perfmonstatus_r() - - addr_map_rtr_base_r())) == 0U, - "NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED_EMPTY"); - if (timeout && ret == 0) { - ret = -EIO; - } - - timeout = HWPM_TIMEOUT(pmmsys_sys0router_enginestatus_status_v( - hwpm_readl(hwpm, T234_SOC_HWPM_RTR_DT, - pmmsys_sys0router_enginestatus_r() - - addr_map_rtr_base_r())) == - pmmsys_sys0router_enginestatus_status_empty_v(), - "NV_PERF_PMMSYS_SYS0ROUTER_ENGINESTATUS_STATUS_EMPTY"); - if (timeout && ret == 0) { - ret = -EIO; - } - - field_mask = pmasys_enginestatus_status_m() | - pmasys_enginestatus_rbufempty_m(); - field_val = pmasys_enginestatus_status_empty_f() | - pmasys_enginestatus_rbufempty_empty_f(); - timeout = HWPM_TIMEOUT((hwpm_readl(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_enginestatus_r() - - addr_map_pma_base_r()) & field_mask) == field_val, - "NV_PERF_PMASYS_ENGINESTATUS"); - if (timeout && ret == 0) { - ret = -EIO; - } - - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_PMA].reserved = false; - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_CMD_SLICE_RTR].reserved = false; - - return ret; -} - -int t234_soc_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm) -{ - int ret; - u32 field_mask = 0U; - u32 field_val = 0U; - - ret = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_cg2_r() - addr_map_pma_base_r(), - pmasys_cg2_slcg_m(), pmasys_cg2_slcg_disabled_f(), - false, false); - if (ret < 0) { - tegra_soc_hwpm_err("Unable to disable PMA SLCG"); - ret = -EIO; - goto fail; - } - - field_mask = pmmsys_sys0router_cg2_slcg_perfmon_m() | - pmmsys_sys0router_cg2_slcg_router_m() | - pmmsys_sys0router_cg2_slcg_m(); - field_val = pmmsys_sys0router_cg2_slcg_perfmon_disabled_f() | - pmmsys_sys0router_cg2_slcg_router_disabled_f() | - pmmsys_sys0router_cg2_slcg_disabled_f(); - ret = reg_rmw(hwpm, NULL, T234_SOC_HWPM_RTR_DT, - pmmsys_sys0router_cg2_r() - addr_map_rtr_base_r(), - field_mask, field_val, false, false); - if (ret < 0) { - tegra_soc_hwpm_err("Unable to disable ROUTER SLCG"); - ret = -EIO; - goto fail; - } - - /* Program PROD values */ - ret = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_controlb_r() - addr_map_pma_base_r(), - pmasys_controlb_coalesce_timeout_cycles_m(), - pmasys_controlb_coalesce_timeout_cycles__prod_f(), - false, false); - if (ret < 0) { - tegra_soc_hwpm_err("Unable to program PROD value"); - ret = -EIO; - goto fail; - } - - ret = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_channel_config_user_r(0) - addr_map_pma_base_r(), - pmasys_channel_config_user_coalesce_timeout_cycles_m(), - pmasys_channel_config_user_coalesce_timeout_cycles__prod_f(), - false, false); - if (ret < 0) { - tegra_soc_hwpm_err("Unable to program PROD value"); - ret = -EIO; - goto fail; - } - - goto success; - -fail: - t234_soc_hwpm_pma_rtr_unmap(hwpm); -success: - return ret; -} - -int t234_soc_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm) -{ - int err, ret = 0; - u32 field_mask = 0U; - u32 field_val = 0U; - - err = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_cg2_r() - addr_map_pma_base_r(), - pmasys_cg2_slcg_m(), - pmasys_cg2_slcg_enabled_f(), false, false); - RELEASE_FAIL("Unable to enable PMA SLCG"); - - field_mask = pmmsys_sys0router_cg2_slcg_perfmon_m() | - pmmsys_sys0router_cg2_slcg_router_m() | - pmmsys_sys0router_cg2_slcg_m(); - field_val = pmmsys_sys0router_cg2_slcg_perfmon__prod_f() | - pmmsys_sys0router_cg2_slcg_router__prod_f() | - pmmsys_sys0router_cg2_slcg__prod_f(); - err = reg_rmw(hwpm, NULL, T234_SOC_HWPM_RTR_DT, - pmmsys_sys0router_cg2_r() - addr_map_rtr_base_r(), - field_mask, field_val, false, false); - RELEASE_FAIL("Unable to enable ROUTER SLCG"); - - return err; -} - -static bool t234_soc_hwpm_ip_reg_check(struct hwpm_resource_aperture *aperture, - u64 phys_addr, bool use_absolute_base, - u64 *updated_pa) -{ - u64 start_pa = 0ULL; - u64 end_pa = 0ULL; - - if (!aperture) { - tegra_soc_hwpm_err("Aperture is NULL"); - return false; - } - - if (use_absolute_base) { - start_pa = aperture->start_abs_pa; - end_pa = aperture->end_abs_pa; - } else { - start_pa = aperture->start_pa; - end_pa = aperture->end_pa; - } - - if ((phys_addr >= start_pa) && (phys_addr <= end_pa)) { - tegra_soc_hwpm_dbg("Found aperture:" - " phys_addr(0x%llx), aperture(0x%llx - 0x%llx)", - phys_addr, start_pa, end_pa); - *updated_pa = phys_addr - start_pa + aperture->start_pa; - return true; - } - return false; -} - -/* - * Find an aperture in which phys_addr lies. If check_reservation is true, then - * we also have to do a allowlist check. - */ -struct hwpm_resource_aperture *t234_soc_hwpm_find_aperture( - struct tegra_soc_hwpm *hwpm, u64 phys_addr, - bool use_absolute_base, bool check_reservation, - u64 *updated_pa) -{ - struct hwpm_resource_aperture *aperture = NULL; - int res_idx = 0; - int aprt_idx = 0; - - for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { - if (check_reservation && !hwpm->hwpm_resources[res_idx].reserved) - continue; - - for (aprt_idx = 0; - aprt_idx < hwpm->hwpm_resources[res_idx].map_size; - aprt_idx++) { - aperture = &(hwpm->hwpm_resources[res_idx].map[aprt_idx]); - if (check_reservation) { - if (t234_soc_hwpm_allowlist_check(aperture, phys_addr, - use_absolute_base, updated_pa)) { - return aperture; - } - } else { - if (t234_soc_hwpm_ip_reg_check(aperture, phys_addr, - use_absolute_base, updated_pa)) { - return aperture; - } - } - } - } - - tegra_soc_hwpm_err("Unable to find aperture: phys(0x%llx)", phys_addr); - return NULL; -} - -void t234_soc_hwpm_zero_alist_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture) -{ - u32 alist_idx = 0U; - - for (alist_idx = 0; alist_idx < aperture->alist_size; alist_idx++) { - if (aperture->alist[alist_idx].zero_at_init) { - ioctl_writel(hwpm, aperture, - aperture->start_pa + - aperture->alist[alist_idx].reg_offset, 0); - } - } -} - -int t234_soc_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm, - void *ioctl_struct) -{ - int err = 0; - int res_idx = 0; - int aprt_idx = 0; - u32 full_alist_idx = 0; - u32 aprt_alist_idx = 0; - long pinned_pages = 0; - long page_idx = 0; - u64 alist_buf_size = 0; - u64 num_pages = 0; - u64 *full_alist_u64 = NULL; - void *full_alist = NULL; - struct page **pages = NULL; - struct hwpm_resource_aperture *aperture = NULL; - struct tegra_soc_hwpm_query_allowlist *query_allowlist = - (struct tegra_soc_hwpm_query_allowlist *)ioctl_struct; - unsigned long user_va = (unsigned long)(query_allowlist->allowlist); - unsigned long offset = user_va & ~PAGE_MASK; - - if (hwpm->full_alist_size < 0) { - tegra_soc_hwpm_err("Invalid allowlist size"); - return -EINVAL; - } - alist_buf_size = hwpm->full_alist_size * sizeof(struct allowlist); - - /* Memory map user buffer into kernel address space */ - num_pages = DIV_ROUND_UP(offset + alist_buf_size, PAGE_SIZE); - pages = (struct page **)kzalloc(sizeof(*pages) * num_pages, GFP_KERNEL); - if (!pages) { - tegra_soc_hwpm_err("Couldn't allocate memory for pages array"); - err = -ENOMEM; - goto alist_unmap; - } - pinned_pages = get_user_pages(user_va & PAGE_MASK, num_pages, 0, - pages, NULL); - if (pinned_pages != num_pages) { - tegra_soc_hwpm_err("Requested %llu pages / Got %ld pages", - num_pages, pinned_pages); - err = -ENOMEM; - goto alist_unmap; - } - full_alist = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); - if (!full_alist) { - tegra_soc_hwpm_err("Couldn't map allowlist buffer into" - " kernel address space"); - err = -ENOMEM; - goto alist_unmap; - } - full_alist_u64 = (u64 *)(full_alist + offset); - - /* Fill in allowlist buffer */ - for (res_idx = 0, full_alist_idx = 0; - res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; - res_idx++) { - if (!(hwpm->hwpm_resources[res_idx].reserved)) - continue; - tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); - - for (aprt_idx = 0; - aprt_idx < hwpm->hwpm_resources[res_idx].map_size; - aprt_idx++) { - aperture = &(hwpm->hwpm_resources[res_idx].map[aprt_idx]); - if (aperture->alist) { - for (aprt_alist_idx = 0; - aprt_alist_idx < aperture->alist_size; - aprt_alist_idx++, full_alist_idx++) { - full_alist_u64[full_alist_idx] = - aperture->start_pa + - aperture->alist[aprt_alist_idx].reg_offset; - } - } else { - tegra_soc_hwpm_err("NULL allowlist in aperture(0x%llx - 0x%llx)", - aperture->start_pa, - aperture->end_pa); - } - } - } - -alist_unmap: - if (full_alist) - vunmap(full_alist); - if (pinned_pages > 0) { - for (page_idx = 0; page_idx < pinned_pages; page_idx++) { - set_page_dirty(pages[page_idx]); - put_page(pages[page_idx]); - } - } - if (pages) { - kfree(pages); - } - - return err; -} - -bool t234_soc_hwpm_allowlist_check(struct hwpm_resource_aperture *aperture, - u64 phys_addr, bool use_absolute_base, - u64 *updated_pa) -{ - u32 idx = 0U; - u64 start_pa = 0ULL; - - if (!aperture) { - tegra_soc_hwpm_err("Aperture is NULL"); - return false; - } - if (!aperture->alist) { - tegra_soc_hwpm_err("NULL allowlist in dt_aperture(%d)", - aperture->dt_aperture); - return false; - } - - start_pa = use_absolute_base ? aperture->start_abs_pa : - aperture->start_pa; - - for (idx = 0; idx < aperture->alist_size; idx++) { - if (phys_addr == start_pa + aperture->alist[idx].reg_offset) { - *updated_pa = aperture->start_pa + - aperture->alist[idx].reg_offset; - return true; - } - } - - return false; -} - -void t234_soc_hwpm_get_full_allowlist(struct tegra_soc_hwpm *hwpm) -{ - int res_idx = 0; - int aprt_idx = 0; - struct hwpm_resource_aperture *aperture = NULL; - - for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { - if (!(hwpm->hwpm_resources[res_idx].reserved)) - continue; - tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); - - for (aprt_idx = 0; - aprt_idx < hwpm->hwpm_resources[res_idx].map_size; - aprt_idx++) { - aperture = &(hwpm->hwpm_resources[res_idx].map[aprt_idx]); - if (aperture->alist) { - hwpm->full_alist_size += aperture->alist_size; - } else { - tegra_soc_hwpm_err( - "NULL allowlist in aperture(0x%llx - 0x%llx)", - aperture->start_pa, aperture->end_pa); - } - } - } -} diff --git a/hal/t234/t234_soc_hwpm_init.h b/hal/t234/t234_soc_hwpm_init.h deleted file mode 100644 index c7581ea..0000000 --- a/hal/t234/t234_soc_hwpm_init.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef T234_SOC_HWPM_INIT_H -#define T234_SOC_HWPM_INIT_H - -#include - -void __iomem **t234_soc_hwpm_init_dt_apertures(void); -struct tegra_soc_hwpm_ip_ops *t234_soc_hwpm_init_ip_ops_info(void); -bool t234_soc_hwpm_is_perfmon(u32 dt_aperture); -u64 t234_soc_hwpm_get_perfmon_base(u32 dt_aperture); -bool t234_soc_hwpm_is_dt_aperture(u32 dt_aperture); -u32 t234_soc_hwpm_get_ip_aperture(struct tegra_soc_hwpm *hwpm, - u64 phys_address, u64 *ip_base_addr); -int t234_soc_hwpm_fs_info_init(struct tegra_soc_hwpm *hwpm); -int t234_soc_hwpm_disable_pma_triggers(struct tegra_soc_hwpm *hwpm); -u32 **t234_soc_hwpm_get_mc_fake_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture); -void t234_soc_hwpm_set_mc_fake_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - bool set_null); -int t234_soc_hwpm_pma_rtr_map(struct tegra_soc_hwpm *hwpm); -int t234_soc_hwpm_pma_rtr_unmap(struct tegra_soc_hwpm *hwpm); -int t234_soc_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm); -int t234_soc_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm); -struct hwpm_resource_aperture *t234_soc_hwpm_find_aperture( - struct tegra_soc_hwpm *hwpm, u64 phys_addr, - bool use_absolute_base, bool check_reservation, - u64 *updated_pa); - -void t234_soc_hwpm_zero_alist_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture); -int t234_soc_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm, - void *ioctl_struct); -bool t234_soc_hwpm_allowlist_check(struct hwpm_resource_aperture *aperture, - u64 phys_addr, bool use_absolute_base, - u64 *updated_pa); -void t234_soc_hwpm_get_full_allowlist(struct tegra_soc_hwpm *hwpm); - -int t234_soc_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm, - struct tegra_soc_hwpm_update_get_put *update_get_put); -int t234_soc_hwpm_clear_pipeline(struct tegra_soc_hwpm *hwpm); -int t234_soc_hwpm_stream_buf_map(struct tegra_soc_hwpm *hwpm, - struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream); - -bool t234_soc_hwpm_is_dt_aperture_reserved(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, u32 rsrc_id); -int t234_soc_hwpm_reserve_given_resource( - struct tegra_soc_hwpm *hwpm, u32 resource); -void t234_soc_hwpm_reset_resources(struct tegra_soc_hwpm *hwpm); -void t234_soc_hwpm_disable_perfmons(struct tegra_soc_hwpm *hwpm); -int t234_soc_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm); - -#endif /* T234_SOC_HWPM_INIT_H */ diff --git a/hal/t234/t234_soc_hwpm_ip_map.h b/hal/t234/t234_soc_hwpm_ip_map.h deleted file mode 100644 index 9726dc1..0000000 --- a/hal/t234/t234_soc_hwpm_ip_map.h +++ /dev/null @@ -1,1562 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ - -#ifndef T234_SOC_HWPM_IP_MAP_H -#define T234_SOC_HWPM_IP_MAP_H - -#include -#include -#include - -/* - * Aperture Ranges (start_pa/end_pa): - * - start_pa and end_pa is 0 for PERFMON, PMA, and RTR apertures. These - * ranges will be extracted from the device tree. - * - IP apertures are not listed in the device tree because we don't map them. - * Therefore, start_pa and end_pa for IP apertures are hardcoded here. IP - * apertures are listed here because we need to track their allowlists. - */ -struct hwpm_resource_aperture t234_vi_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_VI0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_VI0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_VI0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_VI1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_VI1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_VI1_PERFMON_DT, - .index_mask = 0x2U, - }, - { - .start_pa = addr_map_vi_thi_base_r(), - .end_pa = addr_map_vi_thi_limit_r(), - .start_abs_pa = addr_map_vi_thi_base_r(), - .end_abs_pa = addr_map_vi_thi_limit_r(), - .fake_registers = NULL, - .alist = t234_vi_thi_alist, - .alist_size = ARRAY_SIZE(t234_vi_thi_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_vi2_thi_base_r(), - .end_pa = addr_map_vi2_thi_limit_r(), - .start_abs_pa = addr_map_vi2_thi_base_r(), - .end_abs_pa = addr_map_vi2_thi_limit_r(), - .fake_registers = NULL, - .alist = t234_vi_thi_alist, - .alist_size = ARRAY_SIZE(t234_vi_thi_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x2U, - }, -}; - -struct hwpm_resource_aperture t234_isp_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_ISP0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_ISP0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_ISP0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_isp_thi_base_r(), - .end_pa = addr_map_isp_thi_limit_r(), - .start_abs_pa = addr_map_isp_thi_base_r(), - .end_abs_pa = addr_map_isp_thi_limit_r(), - .fake_registers = NULL, - .alist = t234_isp_thi_alist, - .alist_size = ARRAY_SIZE(t234_isp_thi_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_vic_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_VICA0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_VICA0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_VICA0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_vic_base_r(), - .end_pa = addr_map_vic_limit_r(), - .start_abs_pa = addr_map_vic_base_r(), - .end_abs_pa = addr_map_vic_limit_r(), - .fake_registers = NULL, - .alist = t234_vic_alist, - .alist_size = ARRAY_SIZE(t234_vic_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_ofa_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_OFAA0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_OFAA0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_OFAA0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_ofa_base_r(), - .end_pa = addr_map_ofa_limit_r(), - .start_abs_pa = addr_map_ofa_base_r(), - .end_abs_pa = addr_map_ofa_limit_r(), - .fake_registers = NULL, - .alist = t234_ofa_alist, - .alist_size = ARRAY_SIZE(t234_ofa_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_pva_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PVAV0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PVAV0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PVAV0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PVAV1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PVAV1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PVAV1_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PVAC0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PVAC0_PERFMON_DT), - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .fake_registers = NULL, - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PVAC0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_pva0_pm_base_r(), - .end_pa = addr_map_pva0_pm_limit_r(), - .start_abs_pa = addr_map_pva0_pm_base_r(), - .end_abs_pa = addr_map_pva0_pm_limit_r(), - .fake_registers = NULL, - .alist = t234_pva0_pm_alist, - .alist_size = ARRAY_SIZE(t234_pva0_pm_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_nvdla_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_NVDLAB0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_NVDLAB0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_NVDLAB0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_NVDLAB1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_NVDLAB1_PERFMON_DT), - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .fake_registers = NULL, - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_NVDLAB1_PERFMON_DT, - .index_mask = 0x2U, - }, - { - .start_pa = addr_map_nvdla0_base_r(), - .end_pa = addr_map_nvdla0_limit_r(), - .start_abs_pa = addr_map_nvdla0_base_r(), - .end_abs_pa = addr_map_nvdla0_limit_r(), - .fake_registers = NULL, - .alist = t234_nvdla_alist, - .alist_size = ARRAY_SIZE(t234_nvdla_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_nvdla1_base_r(), - .end_pa = addr_map_nvdla1_limit_r(), - .start_abs_pa = addr_map_nvdla1_base_r(), - .end_abs_pa = addr_map_nvdla1_limit_r(), - .fake_registers = NULL, - .alist = t234_nvdla_alist, - .alist_size = ARRAY_SIZE(t234_nvdla_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x2U, - }, -}; - -struct hwpm_resource_aperture t234_mgbe_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MGBE0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MGBE0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MGBE0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MGBE1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MGBE1_PERFMON_DT), - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .fake_registers = NULL, - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MGBE1_PERFMON_DT, - .index_mask = 0x2U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MGBE2_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MGBE2_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MGBE2_PERFMON_DT, - .index_mask = 0x4U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MGBE3_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MGBE3_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MGBE3_PERFMON_DT, - .index_mask = 0x8U, - }, - { - .start_pa = addr_map_mgbe0_base_r(), - .end_pa = addr_map_mgbe0_limit_r(), - .start_abs_pa = addr_map_mgbe0_base_r(), - .end_abs_pa = addr_map_mgbe0_limit_r(), - .fake_registers = NULL, - .alist = t234_mgbe_alist, - .alist_size = ARRAY_SIZE(t234_mgbe_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mgbe1_base_r(), - .end_pa = addr_map_mgbe1_limit_r(), - .start_abs_pa = addr_map_mgbe1_base_r(), - .end_abs_pa = addr_map_mgbe1_limit_r(), - .fake_registers = NULL, - .alist = t234_mgbe_alist, - .alist_size = ARRAY_SIZE(t234_mgbe_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x2U, - }, - { - .start_pa = addr_map_mgbe2_base_r(), - .end_pa = addr_map_mgbe2_limit_r(), - .start_abs_pa = addr_map_mgbe2_base_r(), - .end_abs_pa = addr_map_mgbe2_limit_r(), - .fake_registers = NULL, - .alist = t234_mgbe_alist, - .alist_size = ARRAY_SIZE(t234_mgbe_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x4U, - }, - { - .start_pa = addr_map_mgbe3_base_r(), - .end_pa = addr_map_mgbe3_limit_r(), - .start_abs_pa = addr_map_mgbe3_base_r(), - .end_abs_pa = addr_map_mgbe3_limit_r(), - .fake_registers = NULL, - .alist = t234_mgbe_alist, - .alist_size = ARRAY_SIZE(t234_mgbe_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x8U, - }, -}; - -struct hwpm_resource_aperture t234_scf_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_SCF0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_SCF0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_SCF0_PERFMON_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_nvdec_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_NVDECA0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_NVDECA0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_NVDECA0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_nvdec_base_r(), - .end_pa = addr_map_nvdec_limit_r(), - .start_abs_pa = addr_map_nvdec_base_r(), - .end_abs_pa = addr_map_nvdec_limit_r(), - .fake_registers = NULL, - .alist = t234_nvdec_alist, - .alist_size = ARRAY_SIZE(t234_nvdec_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_nvenc_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_NVENCA0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_NVENCA0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_NVENCA0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_nvenc_base_r(), - .end_pa = addr_map_nvenc_limit_r(), - .start_abs_pa = addr_map_nvenc_base_r(), - .end_abs_pa = addr_map_nvenc_limit_r(), - .fake_registers = NULL, - .alist = t234_nvenc_alist, - .alist_size = ARRAY_SIZE(t234_nvenc_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_pcie_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE1_PERFMON_DT, - .index_mask = 0x2U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE2_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE2_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE2_PERFMON_DT, - .index_mask = 0x4U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE3_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE3_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE3_PERFMON_DT, - .index_mask = 0x8U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE4_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE4_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE4_PERFMON_DT, - .index_mask = 0x10U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE5_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE5_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE5_PERFMON_DT, - .index_mask = 0x20U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE6_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE6_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE6_PERFMON_DT, - .index_mask = 0x40U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE7_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE7_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE7_PERFMON_DT, - .index_mask = 0x80U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE8_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE8_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE8_PERFMON_DT, - .index_mask = 0x100U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE9_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE9_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE9_PERFMON_DT, - .index_mask = 0x200U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_PCIE10_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_PCIE10_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PCIE10_PERFMON_DT, - .index_mask = 0x400U, - }, - { - .start_pa = addr_map_pcie_c0_ctl_base_r(), - .end_pa = addr_map_pcie_c0_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c0_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c0_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_pcie_c1_ctl_base_r(), - .end_pa = addr_map_pcie_c1_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c1_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c1_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x2U, - }, - { - .start_pa = addr_map_pcie_c2_ctl_base_r(), - .end_pa = addr_map_pcie_c2_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c2_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c2_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x4U, - }, - { - .start_pa = addr_map_pcie_c3_ctl_base_r(), - .end_pa = addr_map_pcie_c3_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c3_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c3_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x8U, - }, - { - .start_pa = addr_map_pcie_c4_ctl_base_r(), - .end_pa = addr_map_pcie_c4_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c4_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c4_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x10U, - }, - { - .start_pa = addr_map_pcie_c5_ctl_base_r(), - .end_pa = addr_map_pcie_c5_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c5_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c5_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x20U, - }, - { - .start_pa = addr_map_pcie_c6_ctl_base_r(), - .end_pa = addr_map_pcie_c6_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c6_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c6_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x40U, - }, - { - .start_pa = addr_map_pcie_c7_ctl_base_r(), - .end_pa = addr_map_pcie_c7_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c7_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c7_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x80U, - }, - { - .start_pa = addr_map_pcie_c8_ctl_base_r(), - .end_pa = addr_map_pcie_c8_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c8_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c8_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x100U, - }, - { - .start_pa = addr_map_pcie_c9_ctl_base_r(), - .end_pa = addr_map_pcie_c9_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c9_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c9_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x200U, - }, - { - .start_pa = addr_map_pcie_c10_ctl_base_r(), - .end_pa = addr_map_pcie_c10_ctl_limit_r(), - .start_abs_pa = addr_map_pcie_c10_ctl_base_r(), - .end_abs_pa = addr_map_pcie_c10_ctl_limit_r(), - .fake_registers = NULL, - .alist = t234_pcie_ctl_alist, - .alist_size = ARRAY_SIZE(t234_pcie_ctl_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x400U, - }, -}; - -struct hwpm_resource_aperture t234_display_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_NVDISPLAY0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_NVDISPLAY0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_NVDISPLAY0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_disp_base_r(), - .end_pa = addr_map_disp_limit_r(), - .start_abs_pa = addr_map_disp_base_r(), - .end_abs_pa = addr_map_disp_limit_r(), - .fake_registers = NULL, - .alist = t234_disp_alist, - .alist_size = ARRAY_SIZE(t234_disp_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_mss_channel_map[] = { - { - .start_pa = addr_map_mc0_base_r(), - .end_pa = addr_map_mc0_limit_r(), - .start_abs_pa = addr_map_mc0_base_r(), - .end_abs_pa = addr_map_mc0_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc1_base_r(), - .end_pa = addr_map_mc1_limit_r(), - .start_abs_pa = addr_map_mc1_base_r(), - .end_abs_pa = addr_map_mc1_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x2U, - }, - { - .start_pa = addr_map_mc2_base_r(), - .end_pa = addr_map_mc2_limit_r(), - .start_abs_pa = addr_map_mc2_base_r(), - .end_abs_pa = addr_map_mc2_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x4U, - }, - { - .start_pa = addr_map_mc3_base_r(), - .end_pa = addr_map_mc3_limit_r(), - .start_abs_pa = addr_map_mc3_base_r(), - .end_abs_pa = addr_map_mc3_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x8U, - }, - { - .start_pa = addr_map_mc4_base_r(), - .end_pa = addr_map_mc4_limit_r(), - .start_abs_pa = addr_map_mc4_base_r(), - .end_abs_pa = addr_map_mc4_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x10U, - }, - { - .start_pa = addr_map_mc5_base_r(), - .end_pa = addr_map_mc5_limit_r(), - .start_abs_pa = addr_map_mc5_base_r(), - .end_abs_pa = addr_map_mc5_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x20U, - }, - { - .start_pa = addr_map_mc6_base_r(), - .end_pa = addr_map_mc6_limit_r(), - .start_abs_pa = addr_map_mc6_base_r(), - .end_abs_pa = addr_map_mc6_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x40U, - }, - { - .start_pa = addr_map_mc7_base_r(), - .end_pa = addr_map_mc7_limit_r(), - .start_abs_pa = addr_map_mc7_base_r(), - .end_abs_pa = addr_map_mc7_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x80U, - }, - { - .start_pa = addr_map_mc8_base_r(), - .end_pa = addr_map_mc8_limit_r(), - .start_abs_pa = addr_map_mc8_base_r(), - .end_abs_pa = addr_map_mc8_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x100U, - }, - { - .start_pa = addr_map_mc9_base_r(), - .end_pa = addr_map_mc9_limit_r(), - .start_abs_pa = addr_map_mc9_base_r(), - .end_abs_pa = addr_map_mc9_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x200U, - }, - { - .start_pa = addr_map_mc10_base_r(), - .end_pa = addr_map_mc10_limit_r(), - .start_abs_pa = addr_map_mc10_base_r(), - .end_abs_pa = addr_map_mc10_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x400U, - }, - { - .start_pa = addr_map_mc11_base_r(), - .end_pa = addr_map_mc11_limit_r(), - .start_abs_pa = addr_map_mc11_base_r(), - .end_abs_pa = addr_map_mc11_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x800U, - }, - { - .start_pa = addr_map_mc12_base_r(), - .end_pa = addr_map_mc12_limit_r(), - .start_abs_pa = addr_map_mc12_base_r(), - .end_abs_pa = addr_map_mc12_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1000U, - }, - { - .start_pa = addr_map_mc13_base_r(), - .end_pa = addr_map_mc13_limit_r(), - .start_abs_pa = addr_map_mc13_base_r(), - .end_abs_pa = addr_map_mc13_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x2000U, - }, - { - .start_pa = addr_map_mc14_base_r(), - .end_pa = addr_map_mc14_limit_r(), - .start_abs_pa = addr_map_mc14_base_r(), - .end_abs_pa = addr_map_mc14_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x4000U, - }, - { - .start_pa = addr_map_mc15_base_r(), - .end_pa = addr_map_mc15_limit_r(), - .start_abs_pa = addr_map_mc15_base_r(), - .end_abs_pa = addr_map_mc15_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_channel_alist, - .alist_size = ARRAY_SIZE(t234_mss_channel_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x8000U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTA0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTA0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTA0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTA1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTA1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTA1_PERFMON_DT, - .index_mask = 0x2U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTA2_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTA2_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTA2_PERFMON_DT, - .index_mask = 0x4U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTA3_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTA3_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTA3_PERFMON_DT, - .index_mask = 0x8U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTB0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTB0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTB0_PERFMON_DT, - .index_mask = 0x10U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTB1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTB1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTB1_PERFMON_DT, - .index_mask = 0x20U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTB2_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTB2_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTB2_PERFMON_DT, - .index_mask = 0x40U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTB3_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTB3_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTB3_PERFMON_DT, - .index_mask = 0x80U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTC0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTC0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTC0_PERFMON_DT, - .index_mask = 0x100U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTC1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTC1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTC1_PERFMON_DT, - .index_mask = 0x200U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTC2_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTC2_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTC2_PERFMON_DT, - .index_mask = 0x400U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTC3_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTC3_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTC3_PERFMON_DT, - .index_mask = 0x800U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTD0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTD0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTD0_PERFMON_DT, - .index_mask = 0x1000U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTD1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTD1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTD1_PERFMON_DT, - .index_mask = 0x2000U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTD2_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTD2_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTD2_PERFMON_DT, - .index_mask = 0x4000U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSCHANNELPARTD3_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSCHANNELPARTD3_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSCHANNELPARTD3_PERFMON_DT, - .index_mask = 0x8000U, - }, -}; - -struct hwpm_resource_aperture t234_mss_gpu_hub_map[] = { - { - .start_pa = addr_map_mss_nvlink_1_base_r(), - .end_pa = addr_map_mss_nvlink_1_limit_r(), - .start_abs_pa = addr_map_mss_nvlink_1_base_r(), - .end_abs_pa = addr_map_mss_nvlink_1_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_nvlink_alist, - .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mss_nvlink_2_base_r(), - .end_pa = addr_map_mss_nvlink_2_limit_r(), - .start_abs_pa = addr_map_mss_nvlink_2_base_r(), - .end_abs_pa = addr_map_mss_nvlink_2_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_nvlink_alist, - .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x2U, - }, - { - .start_pa = addr_map_mss_nvlink_3_base_r(), - .end_pa = addr_map_mss_nvlink_3_limit_r(), - .start_abs_pa = addr_map_mss_nvlink_3_base_r(), - .end_abs_pa = addr_map_mss_nvlink_3_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_nvlink_alist, - .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x4U, - }, - { - .start_pa = addr_map_mss_nvlink_4_base_r(), - .end_pa = addr_map_mss_nvlink_4_limit_r(), - .start_abs_pa = addr_map_mss_nvlink_4_base_r(), - .end_abs_pa = addr_map_mss_nvlink_4_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_nvlink_alist, - .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x8U, - }, - { - .start_pa = addr_map_mss_nvlink_5_base_r(), - .end_pa = addr_map_mss_nvlink_5_limit_r(), - .start_abs_pa = addr_map_mss_nvlink_5_base_r(), - .end_abs_pa = addr_map_mss_nvlink_5_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_nvlink_alist, - .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x10U, - }, - { - .start_pa = addr_map_mss_nvlink_6_base_r(), - .end_pa = addr_map_mss_nvlink_6_limit_r(), - .start_abs_pa = addr_map_mss_nvlink_6_base_r(), - .end_abs_pa = addr_map_mss_nvlink_6_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_nvlink_alist, - .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x20U, - }, - { - .start_pa = addr_map_mss_nvlink_7_base_r(), - .end_pa = addr_map_mss_nvlink_7_limit_r(), - .start_abs_pa = addr_map_mss_nvlink_7_base_r(), - .end_abs_pa = addr_map_mss_nvlink_7_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_nvlink_alist, - .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x40U, - }, - { - .start_pa = addr_map_mss_nvlink_8_base_r(), - .end_pa = addr_map_mss_nvlink_8_limit_r(), - .start_abs_pa = addr_map_mss_nvlink_8_base_r(), - .end_abs_pa = addr_map_mss_nvlink_8_limit_r(), - .fake_registers = NULL, - .alist = t234_mss_nvlink_alist, - .alist_size = ARRAY_SIZE(t234_mss_nvlink_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x80U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT, - .index_mask = 0xFFU, - }, -}; - -struct hwpm_resource_aperture t234_mss_iso_niso_hub_map[] = { - { - .start_pa = addr_map_mc0_base_r(), - .end_pa = addr_map_mc0_limit_r(), - .start_abs_pa = addr_map_mc0_base_r(), - .end_abs_pa = addr_map_mc0_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc1_base_r(), - .end_pa = addr_map_mc1_limit_r(), - .start_abs_pa = addr_map_mc1_base_r(), - .end_abs_pa = addr_map_mc1_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc2_base_r(), - .end_pa = addr_map_mc2_limit_r(), - .start_abs_pa = addr_map_mc2_base_r(), - .end_abs_pa = addr_map_mc2_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc3_base_r(), - .end_pa = addr_map_mc3_limit_r(), - .start_abs_pa = addr_map_mc3_base_r(), - .end_abs_pa = addr_map_mc3_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc4_base_r(), - .end_pa = addr_map_mc4_limit_r(), - .start_abs_pa = addr_map_mc4_base_r(), - .end_abs_pa = addr_map_mc4_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc5_base_r(), - .end_pa = addr_map_mc5_limit_r(), - .start_abs_pa = addr_map_mc5_base_r(), - .end_abs_pa = addr_map_mc5_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - }, - { - .start_pa = addr_map_mc6_base_r(), - .end_pa = addr_map_mc6_limit_r(), - .start_abs_pa = addr_map_mc6_base_r(), - .end_abs_pa = addr_map_mc6_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc7_base_r(), - .end_pa = addr_map_mc7_limit_r(), - .start_abs_pa = addr_map_mc7_base_r(), - .end_abs_pa = addr_map_mc7_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to7_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc0to7_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc8_base_r(), - .end_pa = addr_map_mc8_limit_r(), - .start_abs_pa = addr_map_mc8_base_r(), - .end_abs_pa = addr_map_mc8_limit_r(), - .fake_registers = NULL, - .alist = t234_mc8_res_mss_iso_niso_hub_alist, - .alist_size = ARRAY_SIZE(t234_mc8_res_mss_iso_niso_hub_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSHUB0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSHUB0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSHUB0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSHUB1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSHUB1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSHUB1_PERFMON_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_mss_mcf_map[] = { - { - .start_pa = addr_map_mc0_base_r(), - .end_pa = addr_map_mc0_limit_r(), - .start_abs_pa = addr_map_mc0_base_r(), - .end_abs_pa = addr_map_mc0_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to1_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mc0to1_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc1_base_r(), - .end_pa = addr_map_mc1_limit_r(), - .start_abs_pa = addr_map_mc1_base_r(), - .end_abs_pa = addr_map_mc1_limit_r(), - .fake_registers = NULL, - .alist = t234_mc0to1_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mc0to1_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc2_base_r(), - .end_pa = addr_map_mc2_limit_r(), - .start_abs_pa = addr_map_mc2_base_r(), - .end_abs_pa = addr_map_mc2_limit_r(), - .fake_registers = NULL, - .alist = t234_mc2to7_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc3_base_r(), - .end_pa = addr_map_mc3_limit_r(), - .start_abs_pa = addr_map_mc3_base_r(), - .end_abs_pa = addr_map_mc3_limit_r(), - .fake_registers = NULL, - .alist = t234_mc2to7_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc4_base_r(), - .end_pa = addr_map_mc4_limit_r(), - .start_abs_pa = addr_map_mc4_base_r(), - .end_abs_pa = addr_map_mc4_limit_r(), - .fake_registers = NULL, - .alist = t234_mc2to7_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc5_base_r(), - .end_pa = addr_map_mc5_limit_r(), - .start_abs_pa = addr_map_mc5_base_r(), - .end_abs_pa = addr_map_mc5_limit_r(), - .fake_registers = NULL, - .alist = t234_mc2to7_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc6_base_r(), - .end_pa = addr_map_mc6_limit_r(), - .start_abs_pa = addr_map_mc6_base_r(), - .end_abs_pa = addr_map_mc6_limit_r(), - .fake_registers = NULL, - .alist = t234_mc2to7_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mc7_base_r(), - .end_pa = addr_map_mc7_limit_r(), - .start_abs_pa = addr_map_mc7_base_r(), - .end_abs_pa = addr_map_mc7_limit_r(), - .fake_registers = NULL, - .alist = t234_mc2to7_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mc2to7_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = addr_map_mcb_base_r(), - .end_pa = addr_map_mcb_limit_r(), - .start_abs_pa = addr_map_mcb_base_r(), - .end_abs_pa = addr_map_mcb_limit_r(), - .fake_registers = NULL, - .alist = t234_mcb_mss_mcf_alist, - .alist_size = ARRAY_SIZE(t234_mcb_mss_mcf_alist), - .is_ip = true, - .dt_aperture = T234_SOC_HWPM_INVALID_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSMCFCLIENT0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSMCFCLIENT0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSMCFCLIENT0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSMCFMEM0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSMCFMEM0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSMCFMEM0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_MSSMCFMEM1_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_MSSMCFMEM1_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_MSSMCFMEM1_PERFMON_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_pma_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = PERFMON_BASE(T234_SOC_HWPM_SYS0_PERFMON_DT), - .end_abs_pa = PERFMON_LIMIT(T234_SOC_HWPM_SYS0_PERFMON_DT), - .fake_registers = NULL, - .alist = t234_perfmon_alist, - .alist_size = ARRAY_SIZE(t234_perfmon_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_SYS0_PERFMON_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = addr_map_pma_base_r(), - .end_abs_pa = addr_map_pma_limit_r(), - .fake_registers = NULL, - .alist = t234_pma_res_pma_alist, - .alist_size = ARRAY_SIZE(t234_pma_res_pma_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PMA_DT, - .index_mask = 0x1U, - }, -}; - -struct hwpm_resource_aperture t234_cmd_slice_rtr_map[] = { - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = addr_map_pma_base_r(), - .end_abs_pa = addr_map_pma_limit_r(), - .fake_registers = NULL, - .alist = t234_pma_res_cmd_slice_rtr_alist, - .alist_size = ARRAY_SIZE(t234_pma_res_cmd_slice_rtr_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_PMA_DT, - .index_mask = 0x1U, - }, - { - .start_pa = 0, - .end_pa = 0, - .start_abs_pa = addr_map_rtr_base_r(), - .end_abs_pa = addr_map_rtr_limit_r(), - .fake_registers = NULL, - .alist = t234_rtr_alist, - .alist_size = ARRAY_SIZE(t234_rtr_alist), - .is_ip = false, - .dt_aperture = T234_SOC_HWPM_RTR_DT, - .index_mask = 0x1U, - }, -}; - -#endif /* T234_SOC_HWPM_IP_MAP_H */ diff --git a/hal/t234/t234_soc_hwpm_mem_buf_utils.c b/hal/t234/t234_soc_hwpm_mem_buf_utils.c deleted file mode 100644 index fb46804..0000000 --- a/hal/t234/t234_soc_hwpm_mem_buf_utils.c +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -int t234_soc_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm, - struct tegra_soc_hwpm_update_get_put *update_get_put) -{ - u32 *mem_bytes_kernel_u32 = NULL; - u32 reg_val = 0U; - u32 field_val = 0U; - int ret; - - - /* Update SW get pointer */ - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_mem_bump_r(0) - addr_map_pma_base_r(), - update_get_put->mem_bump); - - /* Stream MEM_BYTES value to MEM_BYTES buffer */ - if (update_get_put->b_stream_mem_bytes) { - mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel); - *mem_bytes_kernel_u32 = TEGRA_SOC_HWPM_MEM_BYTES_INVALID; - ret = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_channel_control_user_r(0) - addr_map_pma_base_r(), - pmasys_channel_control_user_update_bytes_m(), - pmasys_channel_control_user_update_bytes_doit_f(), - false, false); - if (ret < 0) { - tegra_soc_hwpm_err("Failed to stream mem_bytes to buffer"); - return -EIO; - } - } - - /* Read HW put pointer */ - if (update_get_put->b_read_mem_head) { - update_get_put->mem_head = hwpm_readl(hwpm, - T234_SOC_HWPM_PMA_DT, - pmasys_channel_mem_head_r(0) - addr_map_pma_base_r()); - tegra_soc_hwpm_dbg("MEM_HEAD = 0x%llx", - update_get_put->mem_head); - } - - /* Check overflow error status */ - if (update_get_put->b_check_overflow) { - reg_val = hwpm_readl(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_status_secure_r(0) - - addr_map_pma_base_r()); - field_val = pmasys_channel_status_secure_membuf_status_v( - reg_val); - update_get_put->b_overflowed = (field_val == - pmasys_channel_status_secure_membuf_status_overflowed_v()); - tegra_soc_hwpm_dbg("OVERFLOWED = %u", - update_get_put->b_overflowed); - } - - return 0; -} - -int t234_soc_hwpm_clear_pipeline(struct tegra_soc_hwpm *hwpm) -{ - int err = 0; - int ret = 0; - bool timeout = false; - u32 *mem_bytes_kernel_u32 = NULL; - - /* Stream MEM_BYTES to clear pipeline */ - if (hwpm->mem_bytes_kernel) { - mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel); - *mem_bytes_kernel_u32 = TEGRA_SOC_HWPM_MEM_BYTES_INVALID; - err = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_channel_control_user_r(0) - addr_map_pma_base_r(), - pmasys_channel_control_user_update_bytes_m(), - pmasys_channel_control_user_update_bytes_doit_f(), - false, false); - RELEASE_FAIL("Unable to stream MEM_BYTES"); - timeout = HWPM_TIMEOUT(*mem_bytes_kernel_u32 != - TEGRA_SOC_HWPM_MEM_BYTES_INVALID, - "MEM_BYTES streaming"); - if (timeout && ret == 0) - ret = -EIO; - } - - /* Disable PMA streaming */ - err = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_trigger_config_user_r(0) - addr_map_pma_base_r(), - pmasys_trigger_config_user_record_stream_m(), - pmasys_trigger_config_user_record_stream_disable_f(), - false, false); - RELEASE_FAIL("Unable to disable PMA streaming"); - - err = reg_rmw(hwpm, NULL, T234_SOC_HWPM_PMA_DT, - pmasys_channel_control_user_r(0) - addr_map_pma_base_r(), - pmasys_channel_control_user_stream_m(), - pmasys_channel_control_user_stream_disable_f(), - false, false); - RELEASE_FAIL("Unable to disable PMA streaming"); - - /* Memory Management */ - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outbase_r(0) - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outbaseupper_r(0) - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outsize_r(0) - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_mem_bytes_addr_r(0) - addr_map_pma_base_r(), 0); - - if (hwpm->stream_sgt && (!IS_ERR(hwpm->stream_sgt))) { - dma_buf_unmap_attachment(hwpm->stream_attach, - hwpm->stream_sgt, - DMA_FROM_DEVICE); - } - hwpm->stream_sgt = NULL; - - if (hwpm->stream_attach && (!IS_ERR(hwpm->stream_attach))) { - dma_buf_detach(hwpm->stream_dma_buf, hwpm->stream_attach); - } - hwpm->stream_attach = NULL; - - if (hwpm->stream_dma_buf && (!IS_ERR(hwpm->stream_dma_buf))) { - dma_buf_put(hwpm->stream_dma_buf); - } - hwpm->stream_dma_buf = NULL; - - if (hwpm->mem_bytes_kernel) { - dma_buf_vunmap(hwpm->mem_bytes_dma_buf, - hwpm->mem_bytes_kernel); - hwpm->mem_bytes_kernel = NULL; - } - - if (hwpm->mem_bytes_sgt && (!IS_ERR(hwpm->mem_bytes_sgt))) { - dma_buf_unmap_attachment(hwpm->mem_bytes_attach, - hwpm->mem_bytes_sgt, - DMA_FROM_DEVICE); - } - hwpm->mem_bytes_sgt = NULL; - - if (hwpm->mem_bytes_attach && (!IS_ERR(hwpm->mem_bytes_attach))) { - dma_buf_detach(hwpm->mem_bytes_dma_buf, hwpm->mem_bytes_attach); - } - hwpm->mem_bytes_attach = NULL; - - if (hwpm->mem_bytes_dma_buf && (!IS_ERR(hwpm->mem_bytes_dma_buf))) { - dma_buf_put(hwpm->mem_bytes_dma_buf); - } - hwpm->mem_bytes_dma_buf = NULL; - - return ret; -} - -int t234_soc_hwpm_stream_buf_map(struct tegra_soc_hwpm *hwpm, - struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream) -{ - int ret = 0; - u32 reg_val = 0; - u32 outbase_lo = 0; - u32 outbase_hi = 0; - u32 outsize = 0; - u32 mem_bytes_addr = 0; - - /* Memory map stream buffer */ - hwpm->stream_dma_buf = dma_buf_get(alloc_pma_stream->stream_buf_fd); - if (IS_ERR(hwpm->stream_dma_buf)) { - tegra_soc_hwpm_err("Unable to get stream dma_buf"); - ret = PTR_ERR(hwpm->stream_dma_buf); - goto fail; - } - hwpm->stream_attach = dma_buf_attach(hwpm->stream_dma_buf, hwpm->dev); - if (IS_ERR(hwpm->stream_attach)) { - tegra_soc_hwpm_err("Unable to attach stream dma_buf"); - ret = PTR_ERR(hwpm->stream_attach); - goto fail; - } - hwpm->stream_sgt = dma_buf_map_attachment(hwpm->stream_attach, - DMA_FROM_DEVICE); - if (IS_ERR(hwpm->stream_sgt)) { - tegra_soc_hwpm_err("Unable to map stream attachment"); - ret = PTR_ERR(hwpm->stream_sgt); - goto fail; - } - alloc_pma_stream->stream_buf_pma_va = - sg_dma_address(hwpm->stream_sgt->sgl); - if (alloc_pma_stream->stream_buf_pma_va == 0) { - tegra_soc_hwpm_err("Invalid stream buffer SMMU IOVA"); - ret = -ENXIO; - goto fail; - } - tegra_soc_hwpm_dbg("stream_buf_pma_va = 0x%llx", - alloc_pma_stream->stream_buf_pma_va); - - /* Memory map mem bytes buffer */ - hwpm->mem_bytes_dma_buf = - dma_buf_get(alloc_pma_stream->mem_bytes_buf_fd); - if (IS_ERR(hwpm->mem_bytes_dma_buf)) { - tegra_soc_hwpm_err("Unable to get mem bytes dma_buf"); - ret = PTR_ERR(hwpm->mem_bytes_dma_buf); - goto fail; - } - hwpm->mem_bytes_attach = dma_buf_attach(hwpm->mem_bytes_dma_buf, - hwpm->dev); - if (IS_ERR(hwpm->mem_bytes_attach)) { - tegra_soc_hwpm_err("Unable to attach mem bytes dma_buf"); - ret = PTR_ERR(hwpm->mem_bytes_attach); - goto fail; - } - hwpm->mem_bytes_sgt = dma_buf_map_attachment(hwpm->mem_bytes_attach, - DMA_FROM_DEVICE); - if (IS_ERR(hwpm->mem_bytes_sgt)) { - tegra_soc_hwpm_err("Unable to map mem bytes attachment"); - ret = PTR_ERR(hwpm->mem_bytes_sgt); - goto fail; - } - hwpm->mem_bytes_kernel = dma_buf_vmap(hwpm->mem_bytes_dma_buf); - if (!hwpm->mem_bytes_kernel) { - tegra_soc_hwpm_err( - "Unable to map mem_bytes buffer into kernel VA space"); - ret = -ENOMEM; - goto fail; - } - memset(hwpm->mem_bytes_kernel, 0, 32); - - outbase_lo = alloc_pma_stream->stream_buf_pma_va & - pmasys_channel_outbase_ptr_m(); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outbase_r(0) - addr_map_pma_base_r(), - outbase_lo); - tegra_soc_hwpm_dbg("OUTBASE = 0x%x", reg_val); - - outbase_hi = (alloc_pma_stream->stream_buf_pma_va >> 32) & - pmasys_channel_outbaseupper_ptr_m(); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outbaseupper_r(0) - addr_map_pma_base_r(), - outbase_hi); - tegra_soc_hwpm_dbg("OUTBASEUPPER = 0x%x", reg_val); - - outsize = alloc_pma_stream->stream_buf_size & - pmasys_channel_outsize_numbytes_m(); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outsize_r(0) - addr_map_pma_base_r(), - outsize); - tegra_soc_hwpm_dbg("OUTSIZE = 0x%x", reg_val); - - mem_bytes_addr = sg_dma_address(hwpm->mem_bytes_sgt->sgl) & - pmasys_channel_mem_bytes_addr_ptr_m(); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_mem_bytes_addr_r(0) - addr_map_pma_base_r(), - mem_bytes_addr); - tegra_soc_hwpm_dbg("MEM_BYTES_ADDR = 0x%x", reg_val); - - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_mem_block_r(0) - addr_map_pma_base_r(), - pmasys_channel_mem_block_valid_f( - pmasys_channel_mem_block_valid_true_v())); - - return 0; - -fail: - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_mem_block_r(0) - addr_map_pma_base_r(), - pmasys_channel_mem_block_valid_f( - pmasys_channel_mem_block_valid_false_v())); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outbase_r(0) - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outbaseupper_r(0) - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_outsize_r(0) - addr_map_pma_base_r(), 0); - hwpm_writel(hwpm, T234_SOC_HWPM_PMA_DT, - pmasys_channel_mem_bytes_addr_r(0) - addr_map_pma_base_r(), 0); - - alloc_pma_stream->stream_buf_pma_va = 0; - - if (hwpm->stream_sgt && (!IS_ERR(hwpm->stream_sgt))) { - dma_buf_unmap_attachment(hwpm->stream_attach, - hwpm->stream_sgt, - DMA_FROM_DEVICE); - } - hwpm->stream_sgt = NULL; - - if (hwpm->stream_attach && (!IS_ERR(hwpm->stream_attach))) { - dma_buf_detach(hwpm->stream_dma_buf, hwpm->stream_attach); - } - hwpm->stream_attach = NULL; - - if (hwpm->stream_dma_buf && (!IS_ERR(hwpm->stream_dma_buf))) { - dma_buf_put(hwpm->stream_dma_buf); - } - hwpm->stream_dma_buf = NULL; - - if (hwpm->mem_bytes_kernel) { - dma_buf_vunmap(hwpm->mem_bytes_dma_buf, - hwpm->mem_bytes_kernel); - hwpm->mem_bytes_kernel = NULL; - } - if (hwpm->mem_bytes_sgt && (!IS_ERR(hwpm->mem_bytes_sgt))) { - dma_buf_unmap_attachment(hwpm->mem_bytes_attach, - hwpm->mem_bytes_sgt, - DMA_FROM_DEVICE); - } - hwpm->mem_bytes_sgt = NULL; - - if (hwpm->mem_bytes_attach && (!IS_ERR(hwpm->mem_bytes_attach))) { - dma_buf_detach(hwpm->mem_bytes_dma_buf, hwpm->mem_bytes_attach); - } - hwpm->mem_bytes_attach = NULL; - - if (hwpm->mem_bytes_dma_buf && (!IS_ERR(hwpm->mem_bytes_dma_buf))) { - dma_buf_put(hwpm->mem_bytes_dma_buf); - } - hwpm->mem_bytes_dma_buf = NULL; - - return ret; -} diff --git a/hal/t234/t234_soc_hwpm_perfmon_dt.h b/hal/t234/t234_soc_hwpm_perfmon_dt.h deleted file mode 100644 index e727245..0000000 --- a/hal/t234/t234_soc_hwpm_perfmon_dt.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * This header contains HW aperture and register info for the Tegra SOC HWPM - * driver. - */ - -#ifndef T234_SOC_HWPM_PERFMON_DT_H -#define T234_SOC_HWPM_PERFMON_DT_H - -#include - -#include -#include -#include - -enum t234_soc_hwpm_dt_aperture { - T234_SOC_HWPM_INVALID_DT = -1, - - /* PERFMONs */ - T234_SOC_HWPM_FIRST_PERFMON_DT = 0, - T234_SOC_HWPM_VI0_PERFMON_DT = T234_SOC_HWPM_FIRST_PERFMON_DT, - T234_SOC_HWPM_VI1_PERFMON_DT = T234_SOC_HWPM_FIRST_PERFMON_DT + 1, - T234_SOC_HWPM_ISP0_PERFMON_DT, - T234_SOC_HWPM_VICA0_PERFMON_DT, - T234_SOC_HWPM_OFAA0_PERFMON_DT, - T234_SOC_HWPM_PVAV0_PERFMON_DT, - T234_SOC_HWPM_PVAV1_PERFMON_DT, - T234_SOC_HWPM_PVAC0_PERFMON_DT, - T234_SOC_HWPM_NVDLAB0_PERFMON_DT, - T234_SOC_HWPM_NVDLAB1_PERFMON_DT, - T234_SOC_HWPM_NVDISPLAY0_PERFMON_DT, - T234_SOC_HWPM_SYS0_PERFMON_DT, - T234_SOC_HWPM_MGBE0_PERFMON_DT, - T234_SOC_HWPM_MGBE1_PERFMON_DT, - T234_SOC_HWPM_MGBE2_PERFMON_DT, - T234_SOC_HWPM_MGBE3_PERFMON_DT, - T234_SOC_HWPM_SCF0_PERFMON_DT, - T234_SOC_HWPM_NVDECA0_PERFMON_DT, - T234_SOC_HWPM_NVENCA0_PERFMON_DT, - T234_SOC_HWPM_MSSNVLHSH0_PERFMON_DT, - T234_SOC_HWPM_PCIE0_PERFMON_DT, - T234_SOC_HWPM_PCIE1_PERFMON_DT, - T234_SOC_HWPM_PCIE2_PERFMON_DT, - T234_SOC_HWPM_PCIE3_PERFMON_DT, - T234_SOC_HWPM_PCIE4_PERFMON_DT, - T234_SOC_HWPM_PCIE5_PERFMON_DT, - T234_SOC_HWPM_PCIE6_PERFMON_DT, - T234_SOC_HWPM_PCIE7_PERFMON_DT, - T234_SOC_HWPM_PCIE8_PERFMON_DT, - T234_SOC_HWPM_PCIE9_PERFMON_DT, - T234_SOC_HWPM_PCIE10_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTA0_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTA1_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTA2_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTA3_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTB0_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTB1_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTB2_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTB3_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTC0_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTC1_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTC2_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTC3_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTD0_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTD1_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTD2_PERFMON_DT, - T234_SOC_HWPM_MSSCHANNELPARTD3_PERFMON_DT, - T234_SOC_HWPM_MSSHUB0_PERFMON_DT, - T234_SOC_HWPM_MSSHUB1_PERFMON_DT, - T234_SOC_HWPM_MSSMCFCLIENT0_PERFMON_DT, - T234_SOC_HWPM_MSSMCFMEM0_PERFMON_DT, - T234_SOC_HWPM_MSSMCFMEM1_PERFMON_DT, - T234_SOC_HWPM_LAST_PERFMON_DT = T234_SOC_HWPM_MSSMCFMEM1_PERFMON_DT, - T234_SOC_HWPM_PMA_DT = T234_SOC_HWPM_LAST_PERFMON_DT + 1, - T234_SOC_HWPM_RTR_DT, - T234_SOC_HWPM_NUM_DT_APERTURES -}; -#define IS_PERFMON(idx) (((idx) >= T234_SOC_HWPM_FIRST_PERFMON_DT) && \ - ((idx) <= T234_SOC_HWPM_LAST_PERFMON_DT)) - -/* RPG_PM Aperture */ -#define PERFMON_BASE(ip_idx) (addr_map_rpg_pm_base_r() + \ - ((u32)(ip_idx)) * pmmsys_perdomain_offset_v()) -#define PERFMON_LIMIT(ip_idx) (PERFMON_BASE((ip_idx) + 1) - 1) - -#endif /* T234_SOC_HWPM_PERFMON_DT_H */ diff --git a/hal/t234/t234_soc_hwpm_resource_utils.c b/hal/t234/t234_soc_hwpm_resource_utils.c deleted file mode 100644 index df5a692..0000000 --- a/hal/t234/t234_soc_hwpm_resource_utils.c +++ /dev/null @@ -1,531 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include - -/* - * Normally there is a 1-to-1 mapping between an MMIO aperture and a - * hwpm_resource_aperture struct. But MC MMIO apertures are used in multiple - * hwpm_resource_aperture structs. Therefore, we have to share the fake register - * arrays between these hwpm_resource_aperture structs. This is why we have to - * define the fake register arrays globally. For all other 1-to-1 mapping - * apertures the fake register arrays are directly embedded inside the - * hwpm_resource_aperture structs. - */ -u32 *t234_mc_fake_regs[16] = {NULL}; - -bool t234_soc_hwpm_is_dt_aperture_reserved(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, u32 rsrc_id) -{ - return ((aperture->dt_aperture == T234_SOC_HWPM_PMA_DT) || - (aperture->dt_aperture == T234_SOC_HWPM_RTR_DT) || - (aperture->dt_aperture == T234_SOC_HWPM_SYS0_PERFMON_DT) || - ((aperture->index_mask & hwpm->ip_fs_info[rsrc_id]) != 0)); -} - -u32 **t234_soc_hwpm_get_mc_fake_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture) -{ - if (!hwpm->fake_registers_enabled) - return NULL; - if (!aperture) { - tegra_soc_hwpm_err("aperture is NULL"); - return NULL; - } - - switch (aperture->start_pa) { - case addr_map_mc0_base_r(): - return &t234_mc_fake_regs[0]; - case addr_map_mc1_base_r(): - return &t234_mc_fake_regs[1]; - case addr_map_mc2_base_r(): - return &t234_mc_fake_regs[2]; - case addr_map_mc3_base_r(): - return &t234_mc_fake_regs[3]; - case addr_map_mc4_base_r(): - return &t234_mc_fake_regs[4]; - case addr_map_mc5_base_r(): - return &t234_mc_fake_regs[5]; - case addr_map_mc6_base_r(): - return &t234_mc_fake_regs[6]; - case addr_map_mc7_base_r(): - return &t234_mc_fake_regs[7]; - case addr_map_mc8_base_r(): - return &t234_mc_fake_regs[8]; - case addr_map_mc9_base_r(): - return &t234_mc_fake_regs[9]; - case addr_map_mc10_base_r(): - return &t234_mc_fake_regs[10]; - case addr_map_mc11_base_r(): - return &t234_mc_fake_regs[11]; - case addr_map_mc12_base_r(): - return &t234_mc_fake_regs[12]; - case addr_map_mc13_base_r(): - return &t234_mc_fake_regs[13]; - case addr_map_mc14_base_r(): - return &t234_mc_fake_regs[14]; - case addr_map_mc15_base_r(): - return &t234_mc_fake_regs[15]; - default: - return NULL; - } -} - -void t234_soc_hwpm_set_mc_fake_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - bool set_null) -{ - u32 *fake_regs = NULL; - - /* Get pointer to array of MSS channel apertures */ - struct hwpm_resource_aperture *l_mss_channel_map = - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_MSS_CHANNEL].map; - /* Get pointer to array of MSS ISO/NISO hub apertures */ - struct hwpm_resource_aperture *l_mss_iso_niso_map = - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_MSS_ISO_NISO_HUBS].map; - /* Get pointer to array of MSS MCF apertures */ - struct hwpm_resource_aperture *l_mss_mcf_map = - hwpm->hwpm_resources[TEGRA_SOC_HWPM_RESOURCE_MSS_MCF].map; - - if (!aperture) { - tegra_soc_hwpm_err("aperture is NULL"); - return; - } - - switch (aperture->start_pa) { - case addr_map_mc0_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[0]; - l_mss_channel_map[0].fake_registers = fake_regs; - l_mss_iso_niso_map[0].fake_registers = fake_regs; - l_mss_mcf_map[0].fake_registers = fake_regs; - break; - case addr_map_mc1_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[1]; - l_mss_channel_map[1].fake_registers = fake_regs; - l_mss_iso_niso_map[1].fake_registers = fake_regs; - l_mss_mcf_map[1].fake_registers = fake_regs; - break; - case addr_map_mc2_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[2]; - l_mss_channel_map[2].fake_registers = fake_regs; - l_mss_iso_niso_map[2].fake_registers = fake_regs; - l_mss_mcf_map[2].fake_registers = fake_regs; - break; - case addr_map_mc3_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[3]; - l_mss_channel_map[3].fake_registers = fake_regs; - l_mss_iso_niso_map[3].fake_registers = fake_regs; - l_mss_mcf_map[3].fake_registers = fake_regs; - break; - case addr_map_mc4_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[4]; - l_mss_channel_map[4].fake_registers = fake_regs; - l_mss_iso_niso_map[4].fake_registers = fake_regs; - l_mss_mcf_map[4].fake_registers = fake_regs; - break; - case addr_map_mc5_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[5]; - l_mss_channel_map[5].fake_registers = fake_regs; - l_mss_iso_niso_map[5].fake_registers = fake_regs; - l_mss_mcf_map[5].fake_registers = fake_regs; - break; - case addr_map_mc6_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[6]; - l_mss_channel_map[6].fake_registers = fake_regs; - l_mss_iso_niso_map[6].fake_registers = fake_regs; - l_mss_mcf_map[6].fake_registers = fake_regs; - break; - case addr_map_mc7_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[7]; - l_mss_channel_map[7].fake_registers = fake_regs; - l_mss_iso_niso_map[7].fake_registers = fake_regs; - l_mss_mcf_map[7].fake_registers = fake_regs; - break; - case addr_map_mc8_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[8]; - l_mss_channel_map[8].fake_registers = fake_regs; - l_mss_iso_niso_map[8].fake_registers = fake_regs; - break; - case addr_map_mc9_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[9]; - l_mss_channel_map[9].fake_registers = fake_regs; - break; - case addr_map_mc10_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[10]; - l_mss_channel_map[10].fake_registers = fake_regs; - break; - case addr_map_mc11_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[11]; - l_mss_channel_map[11].fake_registers = fake_regs; - break; - case addr_map_mc12_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[12]; - l_mss_channel_map[12].fake_registers = fake_regs; - break; - case addr_map_mc13_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[13]; - l_mss_channel_map[13].fake_registers = fake_regs; - break; - case addr_map_mc14_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[14]; - l_mss_channel_map[14].fake_registers = fake_regs; - break; - case addr_map_mc15_base_r(): - fake_regs = (!hwpm->fake_registers_enabled || set_null) ? - NULL : t234_mc_fake_regs[15]; - l_mss_channel_map[15].fake_registers = fake_regs; - break; - default: - break; - } -} - -int t234_soc_hwpm_reserve_given_resource(struct tegra_soc_hwpm *hwpm, u32 resource) -{ - struct hwpm_resource_aperture *aperture = NULL; - int aprt_idx = 0; - int ret = 0, err; - struct tegra_soc_hwpm_ip_ops *ip_ops; - - /* Map reserved apertures and allocate fake register arrays if needed */ - for (aprt_idx = 0; - aprt_idx < hwpm->hwpm_resources[resource].map_size; - aprt_idx++) { - aperture = &(hwpm->hwpm_resources[resource].map[aprt_idx]); - if ((aperture->dt_aperture == T234_SOC_HWPM_PMA_DT) || - (aperture->dt_aperture == T234_SOC_HWPM_RTR_DT)) { - /* PMA and RTR apertures are handled in open(fd) */ - continue; - } else if (t234_soc_hwpm_is_dt_aperture_reserved(hwpm, - aperture, resource)) { - if (t234_soc_hwpm_is_perfmon(aperture->dt_aperture)) { - struct resource *res = NULL; - u64 num_regs = 0; - - tegra_soc_hwpm_dbg("Found PERFMON(0x%llx - 0x%llx)", - aperture->start_pa, aperture->end_pa); - ip_ops = &hwpm->ip_info[aperture->dt_aperture]; - if (ip_ops && (*ip_ops->hwpm_ip_pm)) { - err = (*ip_ops->hwpm_ip_pm) - (ip_ops->ip_dev, true); - if (err) { - tegra_soc_hwpm_err( - "Disable Runtime PM(%d) Failed", - aperture->dt_aperture); - } - } else { - tegra_soc_hwpm_dbg( - "No Runtime PM(%d) for IP", - aperture->dt_aperture); - } - hwpm->dt_apertures[aperture->dt_aperture] = - of_iomap(hwpm->np, aperture->dt_aperture); - if (!hwpm->dt_apertures[aperture->dt_aperture]) { - tegra_soc_hwpm_err("Couldn't map PERFMON(%d)", - aperture->dt_aperture); - ret = -ENOMEM; - goto fail; - } - - res = platform_get_resource(hwpm->pdev, - IORESOURCE_MEM, - aperture->dt_aperture); - if ((!res) || (res->start == 0) || (res->end == 0)) { - tegra_soc_hwpm_err("Invalid resource for PERFMON(%d)", - aperture->dt_aperture); - ret = -ENOMEM; - goto fail; - } - aperture->start_pa = res->start; - aperture->end_pa = res->end; - - if (hwpm->fake_registers_enabled) { - num_regs = (aperture->end_pa + 1 - aperture->start_pa) / - sizeof(*aperture->fake_registers); - aperture->fake_registers = - (u32 *)kzalloc(sizeof(*aperture->fake_registers) * - num_regs, - GFP_KERNEL); - if (!aperture->fake_registers) { - tegra_soc_hwpm_err("Aperture(0x%llx - 0x%llx):" - " Couldn't allocate memory for fake" - " registers", - aperture->start_pa, - aperture->end_pa); - ret = -ENOMEM; - goto fail; - } - } - } else { /* IP apertures */ - if (hwpm->fake_registers_enabled) { - u64 num_regs = 0; - u32 **fake_regs = - t234_soc_hwpm_get_mc_fake_regs(hwpm, aperture); - - if (!fake_regs) - fake_regs = &aperture->fake_registers; - - num_regs = (aperture->end_pa + 1 - aperture->start_pa) / - sizeof(*(*fake_regs)); - *fake_regs = - (u32 *)kzalloc(sizeof(*(*fake_regs)) * num_regs, - GFP_KERNEL); - if (!(*fake_regs)) { - tegra_soc_hwpm_err("Aperture(0x%llx - 0x%llx):" - " Couldn't allocate memory for fake" - " registers", - aperture->start_pa, - aperture->end_pa); - ret = -ENOMEM; - goto fail; - } - - t234_soc_hwpm_set_mc_fake_regs(hwpm, aperture, false); - } - } - } else { - tegra_soc_hwpm_dbg("resource %d index_mask %d not available", - resource, aperture->index_mask); - } - } - - hwpm->hwpm_resources[resource].reserved = true; - goto success; - -fail: - for (aprt_idx = 0; - aprt_idx < hwpm->hwpm_resources[resource].map_size; - aprt_idx++) { - aperture = &(hwpm->hwpm_resources[resource].map[aprt_idx]); - if ((aperture->dt_aperture == T234_SOC_HWPM_PMA_DT) || - (aperture->dt_aperture == T234_SOC_HWPM_RTR_DT)) { - /* PMA and RTR apertures are handled in open(fd) */ - continue; - } else if (t234_soc_hwpm_is_dt_aperture_reserved(hwpm, - aperture, resource)) { - if (t234_soc_hwpm_is_perfmon(aperture->dt_aperture)) { - if (hwpm->dt_apertures[aperture->dt_aperture]) { - iounmap(hwpm->dt_apertures[aperture->dt_aperture]); - hwpm->dt_apertures[aperture->dt_aperture] = NULL; - } - - aperture->start_pa = 0; - aperture->end_pa = 0; - - if (aperture->fake_registers) { - kfree(aperture->fake_registers); - aperture->fake_registers = NULL; - } - } else { /* IP apertures */ - if (aperture->fake_registers) { - kfree(aperture->fake_registers); - aperture->fake_registers = NULL; - t234_soc_hwpm_set_mc_fake_regs(hwpm, aperture, true); - } - } - } - } - - hwpm->hwpm_resources[resource].reserved = false; - -success: - return ret; -} - -void t234_soc_hwpm_reset_resources(struct tegra_soc_hwpm *hwpm) -{ - int res_idx = 0; - int aprt_idx = 0; - struct hwpm_resource_aperture *aperture = NULL; - - /* Reset resource and aperture state */ - for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { - if (!hwpm->hwpm_resources[res_idx].reserved) - continue; - hwpm->hwpm_resources[res_idx].reserved = false; - - for (aprt_idx = 0; - aprt_idx < hwpm->hwpm_resources[res_idx].map_size; - aprt_idx++) { - aperture = &(hwpm->hwpm_resources[res_idx].map[aprt_idx]); - if ((aperture->dt_aperture == T234_SOC_HWPM_PMA_DT) || - (aperture->dt_aperture == T234_SOC_HWPM_RTR_DT)) { - /* PMA and RTR apertures are handled separately */ - continue; - } else if (t234_soc_hwpm_is_perfmon(aperture->dt_aperture)) { - if (hwpm->dt_apertures[aperture->dt_aperture]) { - iounmap(hwpm->dt_apertures[aperture->dt_aperture]); - hwpm->dt_apertures[aperture->dt_aperture] = NULL; - } - - aperture->start_pa = 0; - aperture->end_pa = 0; - - if (aperture->fake_registers) { - kfree(aperture->fake_registers); - aperture->fake_registers = NULL; - } - } else { /* IP apertures */ - if (aperture->fake_registers) { - kfree(aperture->fake_registers); - aperture->fake_registers = NULL; - t234_soc_hwpm_set_mc_fake_regs(hwpm, aperture, true); - } - } - } - } -} - -void t234_soc_hwpm_disable_perfmons(struct tegra_soc_hwpm *hwpm) -{ - int res_idx = 0; - int aprt_idx = 0; - struct hwpm_resource_aperture *aperture = NULL; - struct tegra_soc_hwpm_ip_ops *ip_ops; - int err, ret = 0; - - for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { - if (!hwpm->hwpm_resources[res_idx].reserved) - continue; - tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); - - for (aprt_idx = 0; - aprt_idx < hwpm->hwpm_resources[res_idx].map_size; - aprt_idx++) { - aperture = &(hwpm->hwpm_resources[res_idx].map[aprt_idx]); - if (t234_soc_hwpm_is_perfmon(aperture->dt_aperture)) { - if (t234_soc_hwpm_is_dt_aperture_reserved(hwpm, - aperture, res_idx)) { - tegra_soc_hwpm_dbg("Found PERFMON(0x%llx - 0x%llx)", - aperture->start_pa, - aperture->end_pa); - err = reg_rmw(hwpm, NULL, aperture->dt_aperture, - pmmsys_control_r(0) - addr_map_rpg_pm_base_r(), - pmmsys_control_mode_m(), - pmmsys_control_mode_disable_f(), - false, false); - RELEASE_FAIL("Unable to disable PERFMON(0x%llx - 0x%llx)", - aperture->start_pa, - aperture->end_pa); - ip_ops = &hwpm->ip_info[aperture->dt_aperture]; - if (ip_ops && (*ip_ops->hwpm_ip_pm)) { - err = (*ip_ops->hwpm_ip_pm) - (ip_ops->ip_dev, false); - if (err) { - tegra_soc_hwpm_err( - "Enable Runtime PM(%d) Failed", - aperture->dt_aperture); - } - } else { - tegra_soc_hwpm_dbg( - "No Runtime PM(%d) for IP", - aperture->dt_aperture); - } - } - } - } - } -} - -int t234_soc_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm) -{ - int ret = 0; - int res_idx = 0; - int aprt_idx = 0; - struct hwpm_resource_aperture *aperture = NULL; - - for (res_idx = 0; res_idx < TERGA_SOC_HWPM_NUM_RESOURCES; res_idx++) { - if (!hwpm->hwpm_resources[res_idx].reserved) - continue; - tegra_soc_hwpm_dbg("Found reserved IP(%d)", res_idx); - - for (aprt_idx = 0; - aprt_idx < hwpm->hwpm_resources[res_idx].map_size; - aprt_idx++) { - aperture = &(hwpm->hwpm_resources[res_idx].map[aprt_idx]); - - if (t234_soc_hwpm_is_dt_aperture_reserved(hwpm, - aperture, res_idx)) { - - /* Zero out necessary registers */ - if (aperture->alist) { - t234_soc_hwpm_zero_alist_regs(hwpm, aperture); - } else { - tegra_soc_hwpm_err( - "NULL allowlist in aperture(0x%llx - 0x%llx)", - aperture->start_pa, aperture->end_pa); - } - - /* - * Enable reporting of PERFMON status to - * NV_PERF_PMMSYS_SYS0ROUTER_PERFMONSTATUS_MERGED - */ - if (t234_soc_hwpm_is_perfmon(aperture->dt_aperture)) { - tegra_soc_hwpm_dbg("Found PERFMON(0x%llx - 0x%llx)", - aperture->start_pa, - aperture->end_pa); - ret = reg_rmw(hwpm, NULL, aperture->dt_aperture, - pmmsys_sys0_enginestatus_r(0) - - addr_map_rpg_pm_base_r(), - pmmsys_sys0_enginestatus_enable_m(), - pmmsys_sys0_enginestatus_enable_out_f(), - false, false); - if (ret < 0) { - tegra_soc_hwpm_err( - "Unable to set PMM ENGINESTATUS_ENABLE" - " for PERFMON(0x%llx - 0x%llx)", - aperture->start_pa, - aperture->end_pa); - return -EIO; - } - } - } - } - } - return 0; -} diff --git a/hal/tegra-soc-hwpm-structures.h b/hal/tegra-soc-hwpm-structures.h deleted file mode 100644 index 00bd127..0000000 --- a/hal/tegra-soc-hwpm-structures.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef TEGRA_SOC_HWPM_STRUCTURES_H -#define TEGRA_SOC_HWPM_STRUCTURES_H - -#include -#include -#include -#include - -#include - -#define TEGRA_SOC_HWPM_DT_APERTURE_INVALID 100U - -#define RELEASE_FAIL(msg, ...) \ - do { \ - if (err < 0) { \ - tegra_soc_hwpm_err(msg, ##__VA_ARGS__); \ - if (ret == 0) \ - ret = err; \ - } \ - } while (0) - -/* FIXME: Default timeout is 1 sec. Is this sufficient for pre-si? */ -#define HWPM_TIMEOUT(timeout_check, expiry_msg) ({ \ - bool timeout_expired = false; \ - s32 timeout_msecs = 1000; \ - u32 sleep_msecs = 100; \ - while (!(timeout_check)) { \ - msleep(sleep_msecs); \ - timeout_msecs -= sleep_msecs; \ - if (timeout_msecs <= 0) { \ - tegra_soc_hwpm_err("Timeout expired for %s!", \ - expiry_msg); \ - timeout_expired = true; \ - break; \ - } \ - } \ - timeout_expired; \ -}) - -struct allowlist; -extern struct platform_device *tegra_soc_hwpm_pdev; -extern const struct file_operations tegra_soc_hwpm_ops; - -/* Driver struct */ -struct tegra_soc_hwpm { - /* Device */ - struct platform_device *pdev; - struct device *dev; - struct device_node *np; - struct class class; - dev_t dev_t; - struct cdev cdev; - - struct hwpm_resource *hwpm_resources; - - /* IP floorsweep info */ - u64 ip_fs_info[TERGA_SOC_HWPM_NUM_IPS]; - - /* MMIO apertures in device tree */ - void __iomem **dt_apertures; - - /* Clocks and resets */ - struct clk *la_clk; - struct clk *la_parent_clk; - struct reset_control *la_rst; - struct reset_control *hwpm_rst; - - struct tegra_soc_hwpm_ip_ops *ip_info; - - /* Memory Management */ - struct dma_buf *stream_dma_buf; - struct dma_buf_attachment *stream_attach; - struct sg_table *stream_sgt; - struct dma_buf *mem_bytes_dma_buf; - struct dma_buf_attachment *mem_bytes_attach; - struct sg_table *mem_bytes_sgt; - void *mem_bytes_kernel; - - /* SW State */ - bool bind_completed; - s32 full_alist_size; - - /* Debugging */ -#ifdef CONFIG_DEBUG_FS - struct dentry *debugfs_root; -#endif - bool fake_registers_enabled; -}; - -struct hwpm_resource_aperture { - /* - * If false, this is a HWPM aperture (PERFRMON, PMA or RTR). Else this - * is a non-HWPM aperture (ex: VIC). - */ - bool is_ip; - - /* - * If is_ip == false, specify dt_aperture for readl/writel operations. - * If is_ip == true, dt_aperture == TEGRA_SOC_HWPM_INVALID_DT. - */ - u32 dt_aperture; - - /* Physical aperture */ - u64 start_abs_pa; - u64 end_abs_pa; - u64 start_pa; - u64 end_pa; - - /* Allowlist */ - struct allowlist *alist; - u64 alist_size; - - /* - * Currently, perfmons and perfmuxes for all instances of an IP - * are listed in a single aperture mask. It is possible that - * some instances are disable. In this case, accessing corresponding - * registers will result in kernel panic. - * Bit set in the index_mask value will indicate the instance index - * within that IP (or resource). - */ - u32 index_mask; - - /* Fake registers for VDK which doesn't have a SOC HWPM fmodel */ - u32 *fake_registers; -}; - -struct hwpm_resource { - bool reserved; - u32 map_size; - struct hwpm_resource_aperture *map; -}; - -#endif /* TEGRA_SOC_HWPM_STRUCTURES_H */ diff --git a/hal/tegra_soc_hwpm_init.c b/hal/tegra_soc_hwpm_init.c deleted file mode 100644 index 723fbcf..0000000 --- a/hal/tegra_soc_hwpm_init.c +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -#include - -void __iomem **tegra_soc_hwpm_init_dt_apertures(void) -{ - return t234_soc_hwpm_init_dt_apertures(); -} - -struct tegra_soc_hwpm_ip_ops *tegra_soc_hwpm_init_ip_ops_info(void) -{ - return t234_soc_hwpm_init_ip_ops_info(); -} - -bool tegra_soc_hwpm_is_perfmon(u32 dt_aperture) -{ - return t234_soc_hwpm_is_perfmon(dt_aperture); -} - -u64 tegra_soc_hwpm_get_perfmon_base(u32 dt_aperture) -{ - return t234_soc_hwpm_get_perfmon_base(dt_aperture); -} - -bool tegra_soc_hwpm_is_dt_aperture(u32 dt_aperture) -{ - return t234_soc_hwpm_is_dt_aperture(dt_aperture); -} - -bool tegra_soc_hwpm_is_dt_aperture_reserved(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, u32 rsrc_id) -{ - return t234_soc_hwpm_is_dt_aperture_reserved(hwpm, aperture, rsrc_id); -} - -u32 tegra_soc_hwpm_get_ip_aperture(struct tegra_soc_hwpm *hwpm, - u64 phys_address, u64 *ip_base_addr) -{ - return t234_soc_hwpm_get_ip_aperture(hwpm, phys_address, ip_base_addr); -} - -struct hwpm_resource_aperture *tegra_soc_hwpm_find_aperture( - struct tegra_soc_hwpm *hwpm, u64 phys_addr, - bool use_absolute_base, bool check_reservation, - u64 *updated_pa) -{ - return t234_soc_hwpm_find_aperture(hwpm, phys_addr, - use_absolute_base, check_reservation, updated_pa); -} - -int tegra_soc_hwpm_fs_info_init(struct tegra_soc_hwpm *hwpm) -{ - return t234_soc_hwpm_fs_info_init(hwpm); -} - -int tegra_soc_hwpm_disable_pma_triggers(struct tegra_soc_hwpm *hwpm) -{ - return t234_soc_hwpm_disable_pma_triggers(hwpm); -} - -u32 **tegra_soc_hwpm_get_mc_fake_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture) -{ - return t234_soc_hwpm_get_mc_fake_regs(hwpm, aperture); -} - -void tegra_soc_hwpm_set_mc_fake_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - bool set_null) -{ - t234_soc_hwpm_set_mc_fake_regs(hwpm, aperture, set_null); -} - -int tegra_soc_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm) -{ - return t234_soc_hwpm_disable_slcg(hwpm); -} - -int tegra_soc_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm) -{ - return t234_soc_hwpm_enable_slcg(hwpm); -} - -void tegra_soc_hwpm_zero_alist_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture) -{ - t234_soc_hwpm_zero_alist_regs(hwpm, aperture); -} - -int tegra_soc_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm, - void *ioctl_struct) -{ - return t234_soc_hwpm_update_allowlist(hwpm, ioctl_struct); -} - -bool tegra_soc_hwpm_allowlist_check(struct hwpm_resource_aperture *aperture, - u64 phys_addr, bool use_absolute_base, - u64 *updated_pa) -{ - return t234_soc_hwpm_allowlist_check(aperture, phys_addr, - use_absolute_base, updated_pa); -} - -void tegra_soc_hwpm_get_full_allowlist(struct tegra_soc_hwpm *hwpm) -{ - t234_soc_hwpm_get_full_allowlist(hwpm); -} - -int tegra_soc_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm, - struct tegra_soc_hwpm_update_get_put *update_get_put) -{ - return t234_soc_hwpm_update_mem_bytes(hwpm, update_get_put); -} - -int tegra_soc_hwpm_clear_pipeline(struct tegra_soc_hwpm *hwpm) -{ - return t234_soc_hwpm_clear_pipeline(hwpm); -} - -int tegra_soc_hwpm_stream_buf_map(struct tegra_soc_hwpm *hwpm, - struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream) -{ - return t234_soc_hwpm_stream_buf_map(hwpm, alloc_pma_stream); -} - -int tegra_soc_hwpm_pma_rtr_map(struct tegra_soc_hwpm *hwpm) -{ - return t234_soc_hwpm_pma_rtr_map(hwpm); -} - -int tegra_soc_hwpm_pma_rtr_unmap(struct tegra_soc_hwpm *hwpm) -{ - return t234_soc_hwpm_pma_rtr_unmap(hwpm); -} - -int tegra_soc_hwpm_reserve_given_resource(struct tegra_soc_hwpm *hwpm, u32 resource) -{ - return t234_soc_hwpm_reserve_given_resource(hwpm, resource); -} - -void tegra_soc_hwpm_reset_resources(struct tegra_soc_hwpm *hwpm) -{ - t234_soc_hwpm_reset_resources(hwpm); -} - -void tegra_soc_hwpm_disable_perfmons(struct tegra_soc_hwpm *hwpm) -{ - t234_soc_hwpm_disable_perfmons(hwpm); -} - -int tegra_soc_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm) -{ - return t234_soc_hwpm_bind_resources(hwpm); -} diff --git a/hal/tegra_soc_hwpm_init.h b/hal/tegra_soc_hwpm_init.h deleted file mode 100644 index 202fe37..0000000 --- a/hal/tegra_soc_hwpm_init.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * This header contains interfaces to soc specific functions. - */ - -#ifndef TEGRA_SOC_HWPM_INIT_H -#define TEGRA_SOC_HWPM_INIT_H - -#include -#include - -void __iomem **tegra_soc_hwpm_init_dt_apertures(void); -struct tegra_soc_hwpm_ip_ops *tegra_soc_hwpm_init_ip_ops_info(void); -bool tegra_soc_hwpm_is_perfmon(u32 dt_aperture); -u64 tegra_soc_hwpm_get_perfmon_base(u32 dt_aperture); -bool tegra_soc_hwpm_is_dt_aperture(u32 dt_aperture); -bool tegra_soc_hwpm_is_dt_aperture_reserved(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, u32 rsrc_id); -u32 tegra_soc_hwpm_get_ip_aperture(struct tegra_soc_hwpm *hwpm, - u64 phys_address, u64 *ip_base_addr); -struct hwpm_resource_aperture *tegra_soc_hwpm_find_aperture( - struct tegra_soc_hwpm *hwpm, u64 phys_addr, - bool use_absolute_base, bool check_reservation, - u64 *updated_pa); -int tegra_soc_hwpm_fs_info_init(struct tegra_soc_hwpm *hwpm); -int tegra_soc_hwpm_disable_pma_triggers(struct tegra_soc_hwpm *hwpm); -u32 **tegra_soc_hwpm_get_mc_fake_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture); -void tegra_soc_hwpm_set_mc_fake_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - bool set_null); -int tegra_soc_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm); -int tegra_soc_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm); -void tegra_soc_hwpm_zero_alist_regs(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture); -int tegra_soc_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm, - void *ioctl_struct); -bool tegra_soc_hwpm_allowlist_check(struct hwpm_resource_aperture *aperture, - u64 phys_addr, bool use_absolute_base, - u64 *updated_pa); -void tegra_soc_hwpm_get_full_allowlist(struct tegra_soc_hwpm *hwpm); -int tegra_soc_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm, - struct tegra_soc_hwpm_update_get_put *update_get_put); -int tegra_soc_hwpm_clear_pipeline(struct tegra_soc_hwpm *hwpm); -int tegra_soc_hwpm_stream_buf_map(struct tegra_soc_hwpm *hwpm, - struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream); -int tegra_soc_hwpm_pma_rtr_map(struct tegra_soc_hwpm *hwpm); -int tegra_soc_hwpm_pma_rtr_unmap(struct tegra_soc_hwpm *hwpm); -int tegra_soc_hwpm_reserve_given_resource( - struct tegra_soc_hwpm *hwpm, u32 resource); -void tegra_soc_hwpm_reset_resources(struct tegra_soc_hwpm *hwpm); -void tegra_soc_hwpm_disable_perfmons(struct tegra_soc_hwpm *hwpm); -int tegra_soc_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm); - -#endif /* TEGRA_SOC_HWPM_INIT_H */ diff --git a/include/tegra_hwpm.h b/include/tegra_hwpm.h new file mode 100644 index 0000000..f4452a5 --- /dev/null +++ b/include/tegra_hwpm.h @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef TEGRA_HWPM_H +#define TEGRA_HWPM_H + +#include +#include +#include +#include + +#include + +#undef BIT +#define BIT(x) (0x1U << (u32)(x)) + +#undef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) + +#define TEGRA_SOC_HWPM_IP_INACTIVE ~(0U) + +/* FIXME: Default timeout is 1 sec. Is this sufficient for pre-si? */ +#define HWPM_TIMEOUT(timeout_check, expiry_msg) ({ \ + bool timeout_expired = false; \ + s32 timeout_msecs = 1000; \ + u32 sleep_msecs = 100; \ + while (!(timeout_check)) { \ + msleep(sleep_msecs); \ + timeout_msecs -= sleep_msecs; \ + if (timeout_msecs <= 0) { \ + tegra_hwpm_err(NULL, "Timeout expired for %s!", \ + expiry_msg); \ + timeout_expired = true; \ + break; \ + } \ + } \ + timeout_expired; \ +}) + +struct hwpm_ip_aperture { + /* + * Indicates which domain (HWPM or IP) aperture belongs to, + * used for reverse mapping + */ + bool is_hwpm_element; + + /* HW index : This is used to update IP fs_mask */ + u32 hw_inst_mask; + + /* MMIO device tree aperture - only populated for perfmon */ + void __iomem *dt_mmio; + + /* DT tree name */ + char name[64]; + + /* IP ops - only populated for perfmux */ + struct tegra_soc_hwpm_ip_ops ip_ops; + + /* Allowlist */ + struct allowlist *alist; + u64 alist_size; + + /* Physical aperture */ + u64 start_abs_pa; + u64 end_abs_pa; + + /* MMIO aperture */ + u64 start_pa; + u64 end_pa; + + /* Base address: used to calculate register offset */ + u64 base_pa; + + /* Fake registers for VDK which doesn't have a SOC HWPM fmodel */ + u32 *fake_registers; +}; + +typedef struct hwpm_ip_aperture hwpm_ip_perfmon; +typedef struct hwpm_ip_aperture hwpm_ip_perfmux; + +struct hwpm_ip { + /* Number of instances */ + u32 num_instances; + + /* Number of perfmons per instance */ + u32 num_perfmon_per_inst; + + /* Number of perfmuxes per instance */ + u32 num_perfmux_per_inst; + + /* IP perfmon address range */ + u64 perfmon_range_start; + u64 perfmon_range_end; + + /* Perfmon physical address stride for each IP instance */ + u64 inst_perfmon_stride; + + /* + * Perfmon slots that can fit into perfmon address range. + * This gives number of indices in ip_perfmon + */ + u32 num_perfmon_slots; + + /* IP perfmon array */ + hwpm_ip_perfmon **ip_perfmon; + + /* IP perfmux address range */ + u64 perfmux_range_start; + u64 perfmux_range_end; + + /* Perfmux physical address stride for each IP instance */ + u64 inst_perfmux_stride; + + /* + * Perfmux slots that can fit into perfmux address range. + * This gives number of indices in ip_perfmux + */ + u32 num_perfmux_slots; + + /* IP perfmux array */ + hwpm_ip_perfmux **ip_perfmux; + + /* Override IP config based on fuse value */ + bool override_enable; + + /* + * IP floorsweep info based on hw index of aperture + * NOTE: This mask needs to based on hw instance index because + * hwpm driver clients use hw instance index to find aperture + * info (start/end address) from hw manual. + */ + u32 fs_mask; + + /* IP perfmon array */ + hwpm_ip_perfmon *perfmon_static_array; + + /* IP perfmux array */ + hwpm_ip_perfmux *perfmux_static_array; + + bool reserved; +}; + +struct tegra_soc_hwpm; + +struct tegra_soc_hwpm_chip { + /* Array of pointers to active IP structures */ + struct hwpm_ip **chip_ips; + + /* Chip HALs */ + bool (*is_ip_active)(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_ip ip_index, u32 *config_ip_index); + bool (*is_resource_active)(struct tegra_soc_hwpm *hwpm, + enum tegra_soc_hwpm_resource res_index, u32 *config_ip_index); + + int (*extract_ip_ops)(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops, bool available); + int (*init_fs_info)(struct tegra_soc_hwpm *hwpm); + int (*get_fs_info)(struct tegra_soc_hwpm *hwpm, + u32 ip_index, u64 *fs_mask, u8 *ip_status); + + int (*init_prod_values)(struct tegra_soc_hwpm *hwpm); + int (*disable_slcg)(struct tegra_soc_hwpm *hwpm); + int (*enable_slcg)(struct tegra_soc_hwpm *hwpm); + + int (*reserve_pma)(struct tegra_soc_hwpm *hwpm); + int (*reserve_rtr)(struct tegra_soc_hwpm *hwpm); + int (*release_pma)(struct tegra_soc_hwpm *hwpm); + int (*release_rtr)(struct tegra_soc_hwpm *hwpm); + + int (*reserve_given_resource)(struct tegra_soc_hwpm *hwpm, u32 ip_idx); + int (*bind_reserved_resources)(struct tegra_soc_hwpm *hwpm); + int (*disable_triggers)(struct tegra_soc_hwpm *hwpm); + int (*release_all_resources)(struct tegra_soc_hwpm *hwpm); + + int (*disable_mem_mgmt)(struct tegra_soc_hwpm *hwpm); + int (*enable_mem_mgmt)(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream); + int (*invalidate_mem_config)(struct tegra_soc_hwpm *hwpm); + int (*stream_mem_bytes)(struct tegra_soc_hwpm *hwpm); + int (*disable_pma_streaming)(struct tegra_soc_hwpm *hwpm); + int (*update_mem_bytes_get_ptr)(struct tegra_soc_hwpm *hwpm, + u64 mem_bump); + u64 (*get_mem_bytes_put_ptr)(struct tegra_soc_hwpm *hwpm); + bool (*membuf_overflow_status)(struct tegra_soc_hwpm *hwpm); + + size_t (*get_alist_buf_size)(struct tegra_soc_hwpm *hwpm); + int (*zero_alist_regs)(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture); + int (*get_alist_size)(struct tegra_soc_hwpm *hwpm); + int (*combine_alist)(struct tegra_soc_hwpm *hwpm, u64 *alist); + bool (*check_alist)(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 phys_addr); + + int (*exec_reg_ops)(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_reg_op *reg_op); + + void (*release_sw_setup)(struct tegra_soc_hwpm *hwpm); +}; + +struct allowlist; +extern struct platform_device *tegra_soc_hwpm_pdev; +extern const struct file_operations tegra_soc_hwpm_ops; + +/* Driver struct */ +struct tegra_soc_hwpm { + /* Device */ + struct platform_device *pdev; + struct device *dev; + struct device_node *np; + struct class class; + dev_t dev_t; + struct cdev cdev; + + /* Device info */ + struct tegra_soc_hwpm_device_info device_info; + + /* Active chip info */ + struct tegra_soc_hwpm_chip *active_chip; + + /* Clocks and resets */ + struct clk *la_clk; + struct clk *la_parent_clk; + struct reset_control *la_rst; + struct reset_control *hwpm_rst; + + /* Memory Management */ + struct dma_buf *stream_dma_buf; + struct dma_buf_attachment *stream_attach; + struct sg_table *stream_sgt; + struct dma_buf *mem_bytes_dma_buf; + struct dma_buf_attachment *mem_bytes_attach; + struct sg_table *mem_bytes_sgt; + void *mem_bytes_kernel; + + /* SW State */ + bool bind_completed; + u64 full_alist_size; + + atomic_t hwpm_in_use; + + u32 dbg_mask; + + /* Debugging */ +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; +#endif + bool fake_registers_enabled; +}; + +#endif /* TEGRA_HWPM_H */ diff --git a/include/tegra_hwpm_common.h b/include/tegra_hwpm_common.h new file mode 100644 index 0000000..5415633 --- /dev/null +++ b/include/tegra_hwpm_common.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef TEGRA_HWPM_COMMON_H +#define TEGRA_HWPM_COMMON_H + +struct tegra_soc_hwpm; +struct tegra_soc_hwpm_exec_reg_ops; +struct tegra_soc_hwpm_ip_floorsweep_info; +struct tegra_soc_hwpm_alloc_pma_stream; +struct tegra_soc_hwpm_update_get_put; + +int tegra_soc_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm); +int tegra_soc_hwpm_init_floorsweep_info(struct tegra_soc_hwpm *hwpm); +int tegra_soc_hwpm_reserve_resource(struct tegra_soc_hwpm *hwpm, u32 resource); +int tegra_soc_hwpm_release_resources(struct tegra_soc_hwpm *hwpm); +int tegra_soc_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm); +int tegra_soc_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm); +int tegra_soc_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm, + void *ioctl_struct); +int tegra_soc_hwpm_exec_regops(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_exec_reg_ops *exec_reg_ops); + +int tegra_soc_hwpm_setup_hw(struct tegra_soc_hwpm *hwpm); +int tegra_soc_hwpm_setup_sw(struct tegra_soc_hwpm *hwpm); +int tegra_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm); +int tegra_soc_hwpm_release_hw(struct tegra_soc_hwpm *hwpm); +void tegra_soc_hwpm_release_sw_components(struct tegra_soc_hwpm *hwpm); + +int tegra_soc_hwpm_get_floorsweep_info(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_ip_floorsweep_info *fs_info); + +int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream); +int tegra_hwpm_clear_mem_pipeline(struct tegra_soc_hwpm *hwpm); +int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_update_get_put *update_get_put); + +#endif /* TEGRA_HWPM_COMMON_H */ diff --git a/tegra-soc-hwpm.h b/include/tegra_hwpm_debugfs.h similarity index 64% rename from tegra-soc-hwpm.h rename to include/tegra_hwpm_debugfs.h index 1be0836..608e844 100644 --- a/tegra-soc-hwpm.h +++ b/include/tegra_hwpm_debugfs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -9,24 +9,10 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * tegra-soc-hwpm.h: - * This is the header for the Tegra SOC HWPM driver. */ -#ifndef TEGRA_SOC_HWPM_H -#define TEGRA_SOC_HWPM_H - -// -// -// #include -// - -// #include "tegra-soc-hwpm-log.h" -// +#ifndef TEGRA_HWPM_DEBUGFS_H +#define TEGRA_HWPM_DEBUGFS_H struct tegra_soc_hwpm; @@ -38,4 +24,4 @@ static inline void tegra_soc_hwpm_debugfs_init(struct tegra_soc_hwpm *hwpm) {} static inline void tegra_soc_hwpm_debugfs_deinit(struct tegra_soc_hwpm *hwpm) {} #endif /* CONFIG_DEBUG_FS */ -#endif /* TEGRA_SOC_HWPM_H */ +#endif /* TEGRA_HWPM_DEBUGFS_H */ diff --git a/include/tegra_hwpm_io.h b/include/tegra_hwpm_io.h new file mode 100644 index 0000000..b00ced4 --- /dev/null +++ b/include/tegra_hwpm_io.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef TEGRA_HWPM_IO_H +#define TEGRA_HWPM_IO_H + +/** + * Sets a particular field value in input data. + * + * Uses mask to clear specific bit positions in curr_val. field_val + * is used to set the bits in curr_val to be returned. + * Note: Function does not perform any validation of input parameters. + * + * curr_val [in] Current input data value. + * + * mask [in] Mask of the bits to be updated. + * + * field_val [in] Value to change the mask bits to. + * + * Returns updated value. + */ +static inline u32 set_field(u32 curr_val, u32 mask, u32 field_val) +{ + return ((curr_val & ~mask) | field_val); +} + +/** + * Retrieve value of specific bits from input data. + * Note: Function does not perform any validation of input parameters. + * + * input_data [in] Data to retrieve value from. + * + * mask [in] Mask of the bits to get value from. + * + * Return value from input_data corresponding to mask bits. + */ +static inline u32 get_field(u32 input_data, u32 mask) +{ + return (input_data & mask); +} + + +struct tegra_soc_hwpm; +struct hwpm_ip_aperture; + +u32 tegra_hwpm_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 addr); +void tegra_hwpm_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 addr, u32 val); +u32 regops_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 addr); +void regops_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 addr, u32 val); + +#endif /* TEGRA_HWPM_IO_H */ diff --git a/include/tegra_hwpm_log.h b/include/tegra_hwpm_log.h new file mode 100644 index 0000000..b1ea940 --- /dev/null +++ b/include/tegra_hwpm_log.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef TEGRA_HWPM_LOG_H +#define TEGRA_HWPM_LOG_H + +#include + +#define TEGRA_SOC_HWPM_MODULE_NAME "tegra-soc-hwpm" + +enum tegra_soc_hwpm_log_type { + TEGRA_HWPM_ERROR, /* Error prints */ + TEGRA_HWPM_DEBUG, /* Debug prints */ +}; + +#define TEGRA_HWPM_DEFAULT_DBG_MASK (0) +#define hwpm_fn BIT(0) +#define hwpm_info BIT(1) +#define hwpm_register BIT(2) +#define hwpm_verbose BIT(3) + +#define tegra_hwpm_err(hwpm, fmt, arg...) \ + tegra_soc_err_impl(hwpm, __func__, __LINE__, fmt, ##arg) +#define tegra_hwpm_dbg(hwpm, dbg_mask, fmt, arg...) \ + tegra_hwpm_dbg_impl(hwpm, dbg_mask, __func__, __LINE__, fmt, ##arg) +#define tegra_hwpm_fn(hwpm, fmt, arg...) \ + tegra_hwpm_dbg_impl(hwpm, hwpm_fn, __func__, __LINE__, fmt, ##arg) + +struct tegra_soc_hwpm; + +void tegra_soc_err_impl(struct tegra_soc_hwpm *hwpm, + const char *func, int line, const char *fmt, ...); +void tegra_hwpm_dbg_impl(struct tegra_soc_hwpm *hwpm, + u32 dbg_mask, const char *func, int line, const char *fmt, ...); + +#endif /* TEGRA_HWPM_LOG_H */ diff --git a/tegra-soc-hwpm-debugfs.c b/os/linux/tegra_hwpm_debugfs.c similarity index 60% rename from tegra-soc-hwpm-debugfs.c rename to os/linux/tegra_hwpm_debugfs.c index 4d9270b..eb5719c 100644 --- a/tegra-soc-hwpm-debugfs.c +++ b/os/linux/tegra_hwpm_debugfs.c @@ -1,8 +1,5 @@ /* - * tegra-soc-hwpm-debugfs.c: - * This file adds debugfs nodes for the Tegra SOC HWPM driver. - * - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,32 +9,34 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . */ #include #include -#include -#include "tegra-soc-hwpm-log.h" -#include "tegra-soc-hwpm.h" +#include +#include +#include /* FIXME: This is a placeholder for now. We can add debugfs nodes as needed. */ void tegra_soc_hwpm_debugfs_init(struct tegra_soc_hwpm *hwpm) { if (!hwpm) { - tegra_soc_hwpm_err("Invalid hwpm struct"); + tegra_hwpm_err(hwpm, "Invalid hwpm struct"); return; } - hwpm->debugfs_root = debugfs_create_dir(TEGRA_SOC_HWPM_MODULE_NAME, NULL); + hwpm->debugfs_root = + debugfs_create_dir(TEGRA_SOC_HWPM_MODULE_NAME, NULL); if (!hwpm->debugfs_root) { - tegra_soc_hwpm_err("Failed to create debugfs root directory"); + tegra_hwpm_err(hwpm, "Failed to create debugfs root directory"); goto fail; } + /* Debug logs */ + debugfs_create_u32("log_mask", S_IRUGO|S_IWUSR, hwpm->debugfs_root, + &hwpm->dbg_mask); + return; fail: @@ -48,7 +47,7 @@ fail: void tegra_soc_hwpm_debugfs_deinit(struct tegra_soc_hwpm *hwpm) { if (!hwpm) { - tegra_soc_hwpm_err("Invalid hwpm struct"); + tegra_hwpm_err(hwpm, "Invalid hwpm struct"); return; } diff --git a/os/linux/tegra_hwpm_io.c b/os/linux/tegra_hwpm_io.c new file mode 100644 index 0000000..b9cdbb2 --- /dev/null +++ b/os/linux/tegra_hwpm_io.c @@ -0,0 +1,278 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include + +static u32 fake_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 offset) +{ + u32 reg_val = 0; + + if (!hwpm->fake_registers_enabled) { + tegra_hwpm_err(hwpm, "Fake registers are disabled!"); + return 0; + } + + reg_val = aperture->fake_registers[offset]; + return reg_val; +} + +static void fake_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 offset, u32 val) +{ + if (!hwpm->fake_registers_enabled) { + tegra_hwpm_err(hwpm, "Fake registers are disabled!"); + return; + } + + aperture->fake_registers[offset] = val; +} + +/* + * Read IP domain registers + * IP(except PMA and RTR) perfmux fall in this category + */ +static u32 ip_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 offset) +{ + tegra_hwpm_dbg(hwpm, hwpm_register, + "Aperture (0x%llx-0x%llx) offset(0x%x)", + aperture->start_abs_pa, aperture->end_abs_pa, offset); + + if (hwpm->fake_registers_enabled) { + return fake_readl(hwpm, aperture, offset); + } else { + u32 reg_val = 0U; + struct tegra_soc_hwpm_ip_ops *ip_ops_ptr = &aperture->ip_ops; + if (ip_ops_ptr->hwpm_ip_reg_op != NULL) { + int err = 0; + + err = (*ip_ops_ptr->hwpm_ip_reg_op)(ip_ops_ptr->ip_dev, + TEGRA_SOC_HWPM_IP_REG_OP_READ, + offset, ®_val); + if (err < 0) { + tegra_hwpm_err(hwpm, "Aperture (0x%llx-0x%llx) " + "read offset(0x%llx) failed", + aperture->start_abs_pa, + aperture->end_abs_pa, offset); + return 0U; + } + } else { + /* Fall back to un-registered IP method */ + void __iomem *ptr = NULL; + + ptr = ioremap(aperture->start_abs_pa + offset, 0x4); + if (!ptr) { + tegra_hwpm_err(hwpm, + "Failed to map register(0x%llx)", + aperture->start_abs_pa + offset); + return 0U; + } + reg_val = __raw_readl(ptr); + iounmap(ptr); + } + return reg_val; + } +} + +/* + * Write to IP domain registers + * IP(except PMA and RTR) perfmux fall in this category + */ +static void ip_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 offset, u32 val) +{ + tegra_hwpm_dbg(hwpm, hwpm_register, + "Aperture (0x%llx-0x%llx) offset(0x%llx) val(0x%x)", + aperture->start_abs_pa, aperture->end_abs_pa, offset, val); + + if (hwpm->fake_registers_enabled) { + fake_writel(hwpm, aperture, offset, val); + } else { + struct tegra_soc_hwpm_ip_ops *ip_ops_ptr = &aperture->ip_ops; + if (ip_ops_ptr->hwpm_ip_reg_op != NULL) { + int err = 0; + + err = (*ip_ops_ptr->hwpm_ip_reg_op)(ip_ops_ptr->ip_dev, + TEGRA_SOC_HWPM_IP_REG_OP_WRITE, + offset, &val); + if (err < 0) { + tegra_hwpm_err(hwpm, "Aperture (0x%llx-0x%llx) " + "write offset(0x%llx) val 0x%x failed", + aperture->start_abs_pa, + aperture->end_abs_pa, offset, val); + return; + } + } else { + /* Fall back to un-registered IP method */ + void __iomem *ptr = NULL; + + ptr = ioremap(aperture->start_abs_pa + offset, 0x4); + if (!ptr) { + tegra_hwpm_err(hwpm, + "Failed to map register(0x%llx)", + aperture->start_abs_pa + offset); + return; + } + __raw_writel(val, ptr); + iounmap(ptr); + } + } +} + +/* + * Read HWPM domain registers + * PERFMONs, PMA and RTR registers fall in this category + */ +static u32 hwpm_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u32 offset) +{ + tegra_hwpm_dbg(hwpm, hwpm_register, + "Aperture (0x%llx-0x%llx) base 0x%llx offset(0x%x)", + aperture->start_abs_pa, aperture->end_abs_pa, + (u64 *)aperture->dt_mmio, offset); + + if (aperture->dt_mmio == NULL) { + tegra_hwpm_err(hwpm, "aperture is not iomapped as expected"); + return 0U; + } + + if (hwpm->fake_registers_enabled) { + return fake_readl(hwpm, aperture, offset); + } else { + return readl(aperture->dt_mmio + offset); + } +} + +/* + * Write to HWPM domain registers + * PERFMONs, PMA and RTR registers fall in this category + */ +static void hwpm_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u32 offset, u32 val) +{ + tegra_hwpm_dbg(hwpm, hwpm_register, + "Aperture (0x%llx-0x%llx) base 0x%llx offset(0x%x) val(0x%x)", + aperture->start_abs_pa, aperture->end_abs_pa, + (u64 *)aperture->dt_mmio, offset, val); + + if (aperture->dt_mmio == NULL) { + tegra_hwpm_err(hwpm, "aperture is not iomapped as expected"); + return; + } + + if (hwpm->fake_registers_enabled) { + fake_writel(hwpm, aperture, offset, val); + } else { + writel(val, aperture->dt_mmio + offset); + } +} + +/* + * Read a HWPM domain register. It is assumed that valid aperture + * is passed to the function. + */ +u32 tegra_hwpm_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 addr) +{ + u32 reg_val = 0; + + if (!aperture) { + tegra_hwpm_err(hwpm, "aperture is NULL"); + return -EINVAL; + } + + if (aperture->is_hwpm_element) { + /* HWPM domain registers */ + reg_val = hwpm_readl(hwpm, aperture, addr - aperture->base_pa); + } else { + tegra_hwpm_err(hwpm, "IP aperture read is not expected"); + return -EINVAL; + } + return reg_val; +} + +/* + * Write to a HWPM domain register. It is assumed that valid aperture + * is passed to the function. + */ +void tegra_hwpm_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 addr, u32 val) +{ + if (!aperture) { + tegra_hwpm_err(hwpm, "aperture is NULL"); + return; + } + + if (aperture->is_hwpm_element) { + /* HWPM domain internal registers */ + hwpm_writel(hwpm, aperture, addr - aperture->base_pa, val); + } else { + tegra_hwpm_err(hwpm, "IP aperture write is not expected"); + return; + } +} + +/* + * Read a register from the EXEC_REG_OPS IOCTL. It is assumed that the allowlist + * check has been done before calling this function. + */ +u32 regops_readl(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 addr) +{ + u32 reg_val = 0; + + if (!aperture) { + tegra_hwpm_err(hwpm, "aperture is NULL"); + return 0; + } + + if (aperture->is_hwpm_element) { + /* HWPM unit internal registers */ + reg_val = hwpm_readl(hwpm, aperture, + addr - aperture->start_abs_pa); + } else { + reg_val = ip_readl(hwpm, aperture, + addr - aperture->start_abs_pa); + } + return reg_val; +} + +/* + * Write a register from the EXEC_REG_OPS IOCTL. It is assumed that the + * allowlist check has been done before calling this function. + */ +void regops_writel(struct tegra_soc_hwpm *hwpm, + struct hwpm_ip_aperture *aperture, u64 addr, u32 val) +{ + if (!aperture) { + tegra_hwpm_err(hwpm, "aperture is NULL"); + return; + } + + if (aperture->is_hwpm_element) { + /* HWPM unit internal registers */ + hwpm_writel(hwpm, aperture, addr - aperture->start_abs_pa, val); + } else { + ip_writel(hwpm, aperture, addr - aperture->start_abs_pa, val); + } +} diff --git a/tegra-soc-hwpm-ioctl.c b/os/linux/tegra_hwpm_ioctl.c similarity index 51% rename from tegra-soc-hwpm-ioctl.c rename to os/linux/tegra_hwpm_ioctl.c index 7b6372c..bbb4541 100644 --- a/tegra-soc-hwpm-ioctl.c +++ b/os/linux/tegra_hwpm_ioctl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -9,38 +9,26 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * tegra-soc-hwpm-ioctl.c: - * This file adds IOCTL handlers for the Tegra SOC HWPM driver. */ -#include +#include +#include +#include +#include +#include #include #include #include #include #include -#include -#include #include -#include -/* FIXME: Is this include needed for struct resource? */ -#if 0 -#include -#endif -#include -#include - +#include #include -#include -#include -#include "tegra-soc-hwpm-log.h" -#include -#include +#include +#include +#include +#include #define LA_CLK_RATE 625000000UL @@ -123,15 +111,18 @@ static int device_info_ioctl(struct tegra_soc_hwpm *hwpm, struct tegra_soc_hwpm_device_info *device_info = (struct tegra_soc_hwpm_device_info *)ioctl_struct; - device_info->chip = tegra_get_chip_id(); - device_info->chip_revision = tegra_get_major_rev(); - device_info->revision = tegra_chip_get_revision(); - device_info->platform = tegra_get_platform(); + device_info->chip = hwpm->device_info.chip; + device_info->chip_revision = hwpm->device_info.chip_revision; + device_info->revision = hwpm->device_info.revision; + device_info->platform = hwpm->device_info.platform; - tegra_soc_hwpm_dbg("chip id 0x%x", device_info->chip); - tegra_soc_hwpm_dbg("chip_revision 0x%x", device_info->chip_revision); - tegra_soc_hwpm_dbg("revision 0x%x", device_info->revision); - tegra_soc_hwpm_dbg("platform 0x%x", device_info->platform); + tegra_hwpm_dbg(hwpm, hwpm_verbose, "chip id 0x%x", device_info->chip); + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "chip_revision 0x%x", device_info->chip_revision); + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "revision 0x%x", device_info->revision); + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "platform 0x%x", device_info->platform); return 0; } @@ -139,35 +130,16 @@ static int device_info_ioctl(struct tegra_soc_hwpm *hwpm, static int floorsweep_info_ioctl(struct tegra_soc_hwpm *hwpm, void *ioctl_struct) { - u32 i = 0U; struct tegra_soc_hwpm_ip_floorsweep_info *fs_info = (struct tegra_soc_hwpm_ip_floorsweep_info *)ioctl_struct; if (fs_info->num_queries > TEGRA_SOC_HWPM_IP_QUERIES_MAX) { - tegra_soc_hwpm_err("Number of queries exceed max limit of %u", + tegra_hwpm_err(hwpm, "Number of queries exceed max limit of %u", TEGRA_SOC_HWPM_IP_QUERIES_MAX); return -EINVAL; } - for (i = 0U; i < fs_info->num_queries; i++) { - if (fs_info->ip_fsinfo[i].ip_type < TERGA_SOC_HWPM_NUM_IPS) { - fs_info->ip_fsinfo[i].status = - TEGRA_SOC_HWPM_IP_STATUS_VALID; - fs_info->ip_fsinfo[i].ip_inst_mask = - hwpm->ip_fs_info[fs_info->ip_fsinfo[i].ip_type]; - } else { - fs_info->ip_fsinfo[i].ip_inst_mask = 0ULL; - fs_info->ip_fsinfo[i].status = - TEGRA_SOC_HWPM_IP_STATUS_INVALID; - } - tegra_soc_hwpm_dbg( - "Query %d: ip_type %d: ip_status: %d inst_mask 0x%llx", - i, fs_info->ip_fsinfo[i].ip_type, - fs_info->ip_fsinfo[i].status, - fs_info->ip_fsinfo[i].ip_inst_mask); - } - - return 0; + return tegra_soc_hwpm_get_floorsweep_info(hwpm, fs_info); } static int timer_relation_ioctl(struct tegra_soc_hwpm *hwpm, @@ -179,45 +151,38 @@ static int timer_relation_ioctl(struct tegra_soc_hwpm *hwpm, (struct tegra_soc_hwpm_timer_relation *)ioctl_struct; #endif - tegra_soc_hwpm_err("The GET_GPU_CPU_TIME_CORRELATION_INFO IOCTL is" + tegra_hwpm_err(hwpm, "The GET_GPU_CPU_TIME_CORRELATION_INFO IOCTL is" " currently not implemented"); return -ENXIO; } - - static int reserve_resource_ioctl(struct tegra_soc_hwpm *hwpm, void *ioctl_struct) { struct tegra_soc_hwpm_reserve_resource *reserve_resource = (struct tegra_soc_hwpm_reserve_resource *)ioctl_struct; u32 resource = reserve_resource->resource; + int ret = 0; if (hwpm->bind_completed) { - tegra_soc_hwpm_err("The RESERVE_RESOURCE IOCTL can only be" + tegra_hwpm_err(hwpm, "The RESERVE_RESOURCE IOCTL can only be" " called before the BIND IOCTL."); return -EPERM; } if (resource >= TERGA_SOC_HWPM_NUM_RESOURCES) { - tegra_soc_hwpm_err("Requested resource %d is out of bounds.", + tegra_hwpm_err(hwpm, "Requested resource %d is out of bounds.", resource); return -EINVAL; } - if ((resource < TERGA_SOC_HWPM_NUM_IPS) && - (hwpm->ip_fs_info[resource] == 0)) { - tegra_soc_hwpm_dbg("Requested resource %d unavailable.", - resource); - return 0; + ret = tegra_soc_hwpm_reserve_resource(hwpm, resource); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to reserve resource %d", resource); } - /* - * FIXME: Tell IPs which are being profiled to power up IP and - * disable power management - */ - return tegra_soc_hwpm_reserve_given_resource(hwpm, resource); + return ret; } static int alloc_pma_stream_ioctl(struct tegra_soc_hwpm *hwpm, @@ -225,39 +190,48 @@ static int alloc_pma_stream_ioctl(struct tegra_soc_hwpm *hwpm, { struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream = (struct tegra_soc_hwpm_alloc_pma_stream *)ioctl_struct; + int ret = 0; if (hwpm->bind_completed) { - tegra_soc_hwpm_err("The ALLOC_PMA_STREAM IOCTL can only be" + tegra_hwpm_err(hwpm, "The ALLOC_PMA_STREAM IOCTL can only be" " called before the BIND IOCTL."); return -EPERM; } if (alloc_pma_stream->stream_buf_size == 0) { - tegra_soc_hwpm_err("stream_buf_size is 0"); + tegra_hwpm_err(hwpm, "stream_buf_size is 0"); return -EINVAL; } if (alloc_pma_stream->stream_buf_fd == 0) { - tegra_soc_hwpm_err("Invalid stream_buf_fd"); + tegra_hwpm_err(hwpm, "Invalid stream_buf_fd"); return -EINVAL; } if (alloc_pma_stream->mem_bytes_buf_fd == 0) { - tegra_soc_hwpm_err("Invalid mem_bytes_buf_fd"); + tegra_hwpm_err(hwpm, "Invalid mem_bytes_buf_fd"); return -EINVAL; } - return tegra_soc_hwpm_stream_buf_map(hwpm, alloc_pma_stream); + ret = tegra_hwpm_map_stream_buffer(hwpm, alloc_pma_stream); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to map stream buffer"); + } + + return ret; } static int bind_ioctl(struct tegra_soc_hwpm *hwpm, void *ioctl_struct) { - if (tegra_soc_hwpm_bind_resources(hwpm)) { - tegra_soc_hwpm_err("Failed to bind resources"); - return -EIO; + int ret = 0; + + ret = tegra_soc_hwpm_bind_resources(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to bind resources"); + } else { + hwpm->bind_completed = true; } - hwpm->bind_completed = true; - return 0; + return ret; } static int query_allowlist_ioctl(struct tegra_soc_hwpm *hwpm, @@ -268,154 +242,46 @@ static int query_allowlist_ioctl(struct tegra_soc_hwpm *hwpm, (struct tegra_soc_hwpm_query_allowlist *)ioctl_struct; if (!hwpm->bind_completed) { - tegra_soc_hwpm_err("The QUERY_ALLOWLIST IOCTL can only be called" - " after the BIND IOCTL."); + tegra_hwpm_err(hwpm, + "The QUERY_ALLOWLIST IOCTL can only be called" + " after the BIND IOCTL."); return -EPERM; } - if (query_allowlist->allowlist != NULL) { - /* Concatenate allowlists and return */ - ret = tegra_soc_hwpm_update_allowlist(hwpm, ioctl_struct); - return ret; - } - - /* Return allowlist_size */ - if (hwpm->full_alist_size >= 0) { + if (query_allowlist->allowlist == NULL) { + /* Userspace is querying allowlist size only */ + if (hwpm->full_alist_size == 0) { + /*Full alist size is not computed yet */ + ret = tegra_soc_hwpm_get_allowlist_size(hwpm); + if (ret != 0) { + tegra_hwpm_err(hwpm, + "failed to get alist_size"); + return ret; + } + } query_allowlist->allowlist_size = hwpm->full_alist_size; - return 0; + } else { + /* Concatenate allowlists and return */ + ret = tegra_soc_hwpm_update_allowlist(hwpm, query_allowlist); + if (ret != 0) { + tegra_hwpm_err(hwpm, "Failed to update full alist"); + return ret; + } } - - hwpm->full_alist_size = 0; - tegra_soc_hwpm_get_full_allowlist(hwpm); - - query_allowlist->allowlist_size = hwpm->full_alist_size; - return ret; + return 0; } static int exec_reg_ops_ioctl(struct tegra_soc_hwpm *hwpm, void *ioctl_struct) { - int ret = 0; - struct tegra_soc_hwpm_exec_reg_ops *exec_reg_ops = - (struct tegra_soc_hwpm_exec_reg_ops *)ioctl_struct; - struct hwpm_resource_aperture *aperture = NULL; - int op_idx = 0; - struct tegra_soc_hwpm_reg_op *reg_op = NULL; - u64 upadted_pa = 0ULL; - if (!hwpm->bind_completed) { - tegra_soc_hwpm_err("The EXEC_REG_OPS IOCTL can only be called" + tegra_hwpm_err(hwpm, "The EXEC_REG_OPS IOCTL can only be called" " after the BIND IOCTL."); return -EPERM; } - switch (exec_reg_ops->mode) { - case TEGRA_SOC_HWPM_REG_OP_MODE_FAIL_ON_FIRST: - case TEGRA_SOC_HWPM_REG_OP_MODE_CONT_ON_ERR: - break; - default: - tegra_soc_hwpm_err("Invalid reg ops mode(%u)", - exec_reg_ops->mode); - return -EINVAL; - } - - for (op_idx = 0; op_idx < exec_reg_ops->op_count; op_idx++) { -#define REG_OP_FAIL(op_status, msg, ...) \ - do { \ - tegra_soc_hwpm_err(msg, ##__VA_ARGS__); \ - reg_op->status = \ - TEGRA_SOC_HWPM_REG_OP_STATUS_ ## op_status; \ - exec_reg_ops->b_all_reg_ops_passed = false; \ - if (exec_reg_ops->mode == \ - TEGRA_SOC_HWPM_REG_OP_MODE_FAIL_ON_FIRST) { \ - return -EINVAL; \ - } \ - } while (0) - - reg_op = &(exec_reg_ops->ops[op_idx]); - tegra_soc_hwpm_dbg("reg op: idx(%d), phys(0x%llx), cmd(%u)", - op_idx, reg_op->phys_addr, reg_op->cmd); - - /* The allowlist check is done here */ - aperture = tegra_soc_hwpm_find_aperture(hwpm, reg_op->phys_addr, - true, true, &upadted_pa); - if (!aperture) { - REG_OP_FAIL(INSUFFICIENT_PERMISSIONS, - "Invalid register address(0x%llx)", - reg_op->phys_addr); - continue; - } - - switch (reg_op->cmd) { - case TEGRA_SOC_HWPM_REG_OP_CMD_RD32: - reg_op->reg_val_lo = ioctl_readl(hwpm, - aperture, - upadted_pa); - reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; - break; - - case TEGRA_SOC_HWPM_REG_OP_CMD_RD64: - reg_op->reg_val_lo = ioctl_readl(hwpm, - aperture, - upadted_pa); - reg_op->reg_val_hi = ioctl_readl(hwpm, - aperture, - upadted_pa + 4); - reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; - break; - - /* Read Modify Write operation */ - case TEGRA_SOC_HWPM_REG_OP_CMD_WR32: - ret = reg_rmw(hwpm, aperture, aperture->dt_aperture, - upadted_pa, reg_op->mask_lo, - reg_op->reg_val_lo, true, aperture->is_ip); - if (ret < 0) { - REG_OP_FAIL(WR_FAILED, - "WR32 REGOP failed for register(0x%llx)", - upadted_pa); - } else { - reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; - } - break; - - /* Read Modify Write operation */ - case TEGRA_SOC_HWPM_REG_OP_CMD_WR64: - /* Lower 32 bits */ - ret = reg_rmw(hwpm, aperture, aperture->dt_aperture, - upadted_pa, reg_op->mask_lo, - reg_op->reg_val_lo, true, aperture->is_ip); - if (ret < 0) { - REG_OP_FAIL(WR_FAILED, - "WR64 REGOP failed for register(0x%llx)", - upadted_pa); - continue; - } - - /* Upper 32 bits */ - ret = reg_rmw(hwpm, aperture, aperture->dt_aperture, - upadted_pa + 4, reg_op->mask_hi, - reg_op->reg_val_hi, true, aperture->is_ip); - if (ret < 0) { - REG_OP_FAIL(WR_FAILED, - "WR64 REGOP failed for register(0x%llx)", - upadted_pa + 4); - } else { - reg_op->status = TEGRA_SOC_HWPM_REG_OP_STATUS_SUCCESS; - } - - break; - - default: - REG_OP_FAIL(INVALID_CMD, - "Invalid reg op command(%u)", - reg_op->cmd); - break; - } - - } - - exec_reg_ops->b_all_reg_ops_passed = true; - return 0; + return tegra_soc_hwpm_exec_regops(hwpm, + (struct tegra_soc_hwpm_exec_reg_ops *)ioctl_struct); } static int update_get_put_ioctl(struct tegra_soc_hwpm *hwpm, @@ -425,16 +291,18 @@ static int update_get_put_ioctl(struct tegra_soc_hwpm *hwpm, (struct tegra_soc_hwpm_update_get_put *)ioctl_struct; if (!hwpm->bind_completed) { - tegra_soc_hwpm_err("The UPDATE_GET_PUT IOCTL can only be called" - " after the BIND IOCTL."); + tegra_hwpm_err(hwpm, + "The UPDATE_GET_PUT IOCTL can only be called" + " after the BIND IOCTL."); return -EPERM; } if (!hwpm->mem_bytes_kernel) { - tegra_soc_hwpm_err("mem_bytes buffer is not mapped in the driver"); + tegra_hwpm_err(hwpm, + "mem_bytes buffer is not mapped in the driver"); return -ENXIO; } - return tegra_soc_hwpm_update_mem_bytes(hwpm, update_get_put); + return tegra_hwpm_update_mem_bytes(hwpm, update_get_put); } static long tegra_soc_hwpm_ioctl(struct file *file, @@ -451,26 +319,26 @@ static long tegra_soc_hwpm_ioctl(struct file *file, if ((_IOC_TYPE(cmd) != TEGRA_SOC_HWPM_IOC_MAGIC) || (ioctl_num < 0) || (ioctl_num >= TERGA_SOC_HWPM_NUM_IOCTLS)) { - tegra_soc_hwpm_err("Unsupported IOCTL call"); + tegra_hwpm_err(hwpm, "Unsupported IOCTL call"); ret = -EINVAL; goto end; } if (!file) { - tegra_soc_hwpm_err("Invalid file"); + tegra_hwpm_err(hwpm, "Invalid file"); ret = -ENODEV; goto fail; } if (arg_size != ioctls[ioctl_num].struct_size) { - tegra_soc_hwpm_err("Invalid userspace struct"); + tegra_hwpm_err(hwpm, "Invalid userspace struct"); ret = -EINVAL; goto fail; } hwpm = file->private_data; if (!hwpm) { - tegra_soc_hwpm_err("Invalid hwpm struct"); + tegra_hwpm_err(hwpm, "Invalid hwpm struct"); ret = -ENODEV; goto fail; } @@ -479,7 +347,8 @@ static long tegra_soc_hwpm_ioctl(struct file *file, if (!(ioc_dir & _IOC_NONE)) { arg_copy = kzalloc(arg_size, GFP_KERNEL); if (!arg_copy) { - tegra_soc_hwpm_err("Can't allocate memory for kernel struct"); + tegra_hwpm_err(hwpm, + "Can't allocate memory for kernel struct"); ret = -ENOMEM; goto fail; } @@ -487,8 +356,9 @@ static long tegra_soc_hwpm_ioctl(struct file *file, if (ioc_dir & _IOC_WRITE) { if (copy_from_user(arg_copy, (void __user *)arg, arg_size)) { - tegra_soc_hwpm_err("Failed to copy data from userspace" - " struct into kernel struct"); + tegra_hwpm_err(hwpm, + "Failed to copy data from userspace" + " struct into kernel struct"); ret = -EFAULT; goto fail; } @@ -503,8 +373,8 @@ static long tegra_soc_hwpm_ioctl(struct file *file, if (ioc_dir & _IOC_READ) { if (copy_to_user((void __user *)arg, arg_copy, arg_size)) { - tegra_soc_hwpm_err("Failed to copy data from kernel" - " struct into userspace struct"); + tegra_hwpm_err(hwpm, "Failed to copy data from kernel" + " struct into userspace struct"); ret = -EFAULT; goto fail; } @@ -513,12 +383,12 @@ static long tegra_soc_hwpm_ioctl(struct file *file, if (ret < 0) goto fail; - tegra_soc_hwpm_dbg("The %s IOCTL completed successfully!", + tegra_hwpm_dbg(hwpm, hwpm_info, "The %s IOCTL completed successfully!", ioctls[ioctl_num].name); goto cleanup; fail: - tegra_soc_hwpm_err("The %s IOCTL failed(%d)!", + tegra_hwpm_err(hwpm, "The %s IOCTL failed(%d)!", ioctls[ioctl_num].name, ret); cleanup: if (arg_copy) @@ -532,118 +402,98 @@ static int tegra_soc_hwpm_open(struct inode *inode, struct file *filp) int ret = 0; unsigned int minor; struct tegra_soc_hwpm *hwpm = NULL; - u32 i; if (!inode) { - tegra_soc_hwpm_err("Invalid inode"); + tegra_hwpm_err(hwpm, "Invalid inode"); return -EINVAL; } + if (!filp) { - tegra_soc_hwpm_err("Invalid file"); + tegra_hwpm_err(hwpm, "Invalid file"); return -EINVAL; } minor = iminor(inode); if (minor > 0) { - tegra_soc_hwpm_err("Incorrect minor number"); + tegra_hwpm_err(hwpm, "Incorrect minor number"); return -EBADFD; } hwpm = container_of(inode->i_cdev, struct tegra_soc_hwpm, cdev); if (!hwpm) { - tegra_soc_hwpm_err("Invalid hwpm struct"); + tegra_hwpm_err(hwpm, "Invalid hwpm struct"); return -EINVAL; } filp->private_data = hwpm; + /* Initialize driver on first open call only */ + if (atomic_add_return(1, &hwpm->hwpm_in_use) != 1) { + return 0; + } + if (tegra_platform_is_silicon()) { ret = reset_control_assert(hwpm->hwpm_rst); if (ret < 0) { - tegra_soc_hwpm_err("hwpm reset assert failed"); - ret = -ENODEV; + tegra_hwpm_err(hwpm, "hwpm reset assert failed"); goto fail; } ret = reset_control_assert(hwpm->la_rst); if (ret < 0) { - tegra_soc_hwpm_err("la reset assert failed"); - ret = -ENODEV; + tegra_hwpm_err(hwpm, "la reset assert failed"); goto fail; } /* Set required parent for la_clk */ if (hwpm->la_clk && hwpm->la_parent_clk) { ret = clk_set_parent(hwpm->la_clk, hwpm->la_parent_clk); if (ret < 0) { - tegra_soc_hwpm_err("la clk set parent failed"); - ret = -ENODEV; + tegra_hwpm_err(hwpm, + "la clk set parent failed"); goto fail; } } /* set la_clk rate to 625 MHZ */ ret = clk_set_rate(hwpm->la_clk, LA_CLK_RATE); if (ret < 0) { - tegra_soc_hwpm_err("la clock set rate failed"); - ret = -ENODEV; + tegra_hwpm_err(hwpm, "la clock set rate failed"); goto fail; } ret = clk_prepare_enable(hwpm->la_clk); if (ret < 0) { - tegra_soc_hwpm_err("la clock enable failed"); - ret = -ENODEV; + tegra_hwpm_err(hwpm, "la clock enable failed"); goto fail; } ret = reset_control_deassert(hwpm->la_rst); if (ret < 0) { - tegra_soc_hwpm_err("la reset deassert failed"); - ret = -ENODEV; + tegra_hwpm_err(hwpm, "la reset deassert failed"); goto fail; } ret = reset_control_deassert(hwpm->hwpm_rst); if (ret < 0) { - tegra_soc_hwpm_err("hwpm reset deassert failed"); - ret = -ENODEV; + tegra_hwpm_err(hwpm, "hwpm reset deassert failed"); goto fail; } } - /* Initialize IP floorsweep info */ - tegra_soc_hwpm_dbg("Initialize IP fs info"); - for (i = 0U; i < TERGA_SOC_HWPM_NUM_IPS; i++) { - hwpm->ip_fs_info[i] = 0ULL; - } - - /* Map PMA and RTR apertures */ - ret = tegra_soc_hwpm_fs_info_init(hwpm); + ret = tegra_soc_hwpm_setup_hw(hwpm); if (ret < 0) { - tegra_soc_hwpm_err("Unable to initialize fs fs_info"); - ret = -EIO; + tegra_hwpm_err(hwpm, "Failed to setup hw"); goto fail; } - /* Map PMA and RTR apertures */ - ret = tegra_soc_hwpm_pma_rtr_map(hwpm); + ret = tegra_soc_hwpm_setup_sw(hwpm); if (ret < 0) { - tegra_soc_hwpm_err("Unable to reserve PMA RTR apertures"); - ret = -EIO; + tegra_hwpm_err(hwpm, "Failed to setup sw"); goto fail; } - /* Disable SLCG */ - ret = tegra_soc_hwpm_disable_slcg(hwpm); - if (ret < 0) { - tegra_soc_hwpm_err("Unable to disable SLCG"); - goto fail; - } - - /* Initialize SW state */ - hwpm->bind_completed = false; - hwpm->full_alist_size = -1; - return 0; - fail: - tegra_soc_hwpm_pma_rtr_unmap(hwpm); - tegra_soc_hwpm_err("%s failed", __func__); + ret = tegra_soc_hwpm_release_hw(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to release hw"); + } + tegra_hwpm_err(hwpm, "%s failed", __func__); return ret; } @@ -658,65 +508,69 @@ static ssize_t tegra_soc_hwpm_read(struct file *file, /* FIXME: Fix double release bug */ static int tegra_soc_hwpm_release(struct inode *inode, struct file *filp) { - int err = 0; int ret = 0; struct tegra_soc_hwpm *hwpm = NULL; if (!inode) { - tegra_soc_hwpm_err("Invalid inode"); + tegra_hwpm_err(hwpm, "Invalid inode"); return -EINVAL; } if (!filp) { - tegra_soc_hwpm_err("Invalid file"); + tegra_hwpm_err(hwpm, "Invalid file"); return -EINVAL; } hwpm = container_of(inode->i_cdev, struct tegra_soc_hwpm, cdev); if (!hwpm) { - tegra_soc_hwpm_err("Invalid hwpm struct"); + tegra_hwpm_err(hwpm, "Invalid hwpm struct"); return -EINVAL; } - ret = tegra_soc_hwpm_disable_pma_triggers(hwpm); - if (ret != 0) { - return ret; + /* De-init driver on last close call only */ + if (!atomic_dec_and_test(&hwpm->hwpm_in_use)) { + return 0; } - /* Disable all PERFMONs */ - tegra_soc_hwpm_dbg("Disabling PERFMONs"); - tegra_soc_hwpm_disable_perfmons(hwpm); + ret = tegra_hwpm_disable_triggers(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to disable PMA triggers"); + goto fail; + } + + /* Disable and release reserved IPs */ + ret = tegra_soc_hwpm_release_resources(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to release IP apertures"); + goto fail; + } /* Clear MEM_BYTES pipeline */ - err = tegra_soc_hwpm_clear_pipeline(hwpm); - if (err < 0) { - tegra_soc_hwpm_err("Failed to clear MEM_BYTES pipeline"); - return err; + ret = tegra_hwpm_clear_mem_pipeline(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to clear MEM_BYTES pipeline"); + goto fail; } - /* Enable SLCG */ - err = tegra_soc_hwpm_enable_slcg(hwpm); - if (err != 0) { - tegra_soc_hwpm_err("Unable to enable SLCG"); - return err; + ret = tegra_soc_hwpm_release_hw(hwpm); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to release hw"); + goto fail; } - /* Unmap PMA and RTR apertures */ - err = tegra_soc_hwpm_pma_rtr_unmap(hwpm); - if (err != 0) { - tegra_soc_hwpm_err("Unable to unmap PMA and RTR"); - return err; - } - - tegra_soc_hwpm_reset_resources(hwpm); - if (tegra_platform_is_silicon()) { - err = reset_control_assert(hwpm->hwpm_rst); - RELEASE_FAIL("hwpm reset assert failed"); - err = reset_control_assert(hwpm->la_rst); - RELEASE_FAIL("la reset assert failed"); + ret = reset_control_assert(hwpm->hwpm_rst); + if (ret < 0) { + tegra_hwpm_err(hwpm, "hwpm reset assert failed"); + goto fail; + } + ret = reset_control_assert(hwpm->la_rst); + if (ret < 0) { + tegra_hwpm_err(hwpm, "la reset assert failed"); + goto fail; + } clk_disable_unprepare(hwpm->la_clk); } - +fail: return ret; } diff --git a/os/linux/tegra_hwpm_ip.c b/os/linux/tegra_hwpm_ip.c new file mode 100644 index 0000000..835523b --- /dev/null +++ b/os/linux/tegra_hwpm_ip.c @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include + +#include +#include +#include + +struct platform_device *tegra_soc_hwpm_pdev; + +#define REGISTER_IP true +#define UNREGISTER_IP false + +void tegra_soc_hwpm_ip_register(struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops) +{ + struct tegra_soc_hwpm *hwpm = NULL; + int ret = 0; + + if (tegra_soc_hwpm_pdev == NULL) { + tegra_hwpm_dbg(hwpm, hwpm_info, + "IP %d trying to register. HWPM device not available", + hwpm_ip_ops->ip_index); + } else { + if (hwpm_ip_ops->ip_dev == NULL) { + tegra_hwpm_err(hwpm, "IP dev to register is NULL"); + return; + } + hwpm = platform_get_drvdata(tegra_soc_hwpm_pdev); + + tegra_hwpm_dbg(hwpm, hwpm_info, + "Register IP 0x%llx", hwpm_ip_ops->ip_base_address); + + if (hwpm->active_chip->extract_ip_ops == NULL) { + tegra_hwpm_err(hwpm, "extract_ip_ops uninitialized"); + return; + } + ret = hwpm->active_chip->extract_ip_ops(hwpm, + hwpm_ip_ops, REGISTER_IP); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to set IP ops for IP %d", + hwpm_ip_ops->ip_index); + } + } +} + +void tegra_soc_hwpm_ip_unregister(struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops) +{ + struct tegra_soc_hwpm *hwpm = NULL; + int ret = 0; + + if (tegra_soc_hwpm_pdev == NULL) { + tegra_hwpm_dbg(hwpm, hwpm_info, "HWPM device not available"); + } else { + if (hwpm_ip_ops->ip_dev == NULL) { + tegra_hwpm_err(hwpm, "IP dev to unregister is NULL"); + return; + } + hwpm = platform_get_drvdata(tegra_soc_hwpm_pdev); + + tegra_hwpm_dbg(hwpm, hwpm_info, + "Unregister IP 0x%llx", hwpm_ip_ops->ip_base_address); + + if (hwpm->active_chip->extract_ip_ops == NULL) { + tegra_hwpm_err(hwpm, "extract_ip_ops uninitialized"); + return; + } + ret = hwpm->active_chip->extract_ip_ops(hwpm, + hwpm_ip_ops, UNREGISTER_IP); + if (ret < 0) { + tegra_hwpm_err(hwpm, "Failed to reset IP ops for IP %d", + hwpm_ip_ops->ip_index); + } + } +} + +int tegra_soc_hwpm_get_floorsweep_info(struct tegra_soc_hwpm *hwpm, + struct tegra_soc_hwpm_ip_floorsweep_info *fs_info) +{ + int ret = 0; + u32 i = 0U; + + tegra_hwpm_fn(hwpm, " "); + + if (hwpm->active_chip->get_fs_info == NULL) { + tegra_hwpm_err(hwpm, "get_fs_info uninitialized"); + return -ENODEV; + } + + for (i = 0U; i < fs_info->num_queries; i++) { + ret = hwpm->active_chip->get_fs_info( + hwpm, (u32)fs_info->ip_fsinfo[i].ip_type, + &fs_info->ip_fsinfo[i].ip_inst_mask, + &fs_info->ip_fsinfo[i].status); + if (ret < 0) { + /* Print error for debug purpose. */ + tegra_hwpm_err(hwpm, "Failed to get fs_info"); + } + + tegra_hwpm_dbg(hwpm, hwpm_verbose, + "Query %d: ip_type %d: ip_status: %d inst_mask 0x%llx", + i, fs_info->ip_fsinfo[i].ip_type, + fs_info->ip_fsinfo[i].status, + fs_info->ip_fsinfo[i].ip_inst_mask); + } + return ret; +} diff --git a/tegra-soc-hwpm.c b/os/linux/tegra_hwpm_linux.c similarity index 78% rename from tegra-soc-hwpm.c rename to os/linux/tegra_hwpm_linux.c index 2887977..bf6a97e 100644 --- a/tegra-soc-hwpm.c +++ b/os/linux/tegra_hwpm_linux.c @@ -1,8 +1,5 @@ /* - * tegra-soc-hwpm.c: - * This is Tegra's driver for programming the SOC HWPM path. - * - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +9,6 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . */ #include @@ -29,10 +23,10 @@ #include -#include "tegra-soc-hwpm.h" -#include "tegra-soc-hwpm-log.h" -#include -#include +#include +#include +#include +#include static const struct of_device_id tegra_soc_hwpm_of_match[] = { { @@ -49,14 +43,14 @@ static int tegra_soc_hwpm_probe(struct platform_device *pdev) struct tegra_soc_hwpm *hwpm = NULL; if (!pdev) { - tegra_soc_hwpm_err("Invalid platform device"); + tegra_hwpm_err(NULL, "Invalid platform device"); ret = -ENODEV; goto fail; } hwpm = kzalloc(sizeof(struct tegra_soc_hwpm), GFP_KERNEL); if (!hwpm) { - tegra_soc_hwpm_err("Couldn't allocate memory for hwpm struct"); + tegra_hwpm_err(hwpm, "Couldn't allocate memory for hwpm struct"); ret = -ENOMEM; goto fail; } @@ -69,13 +63,13 @@ static int tegra_soc_hwpm_probe(struct platform_device *pdev) /* Create device node */ ret = class_register(&hwpm->class); if (ret) { - tegra_soc_hwpm_err("Failed to register class"); + tegra_hwpm_err(hwpm, "Failed to register class"); goto class_register; } ret = alloc_chrdev_region(&hwpm->dev_t, 0, 1, dev_name(hwpm->dev)); if (ret) { - tegra_soc_hwpm_err("Failed to allocate device region"); + tegra_hwpm_err(hwpm, "Failed to allocate device region"); goto alloc_chrdev_region; } @@ -84,7 +78,7 @@ static int tegra_soc_hwpm_probe(struct platform_device *pdev) ret = cdev_add(&hwpm->cdev, hwpm->dev_t, 1); if (ret) { - tegra_soc_hwpm_err("Failed to add cdev"); + tegra_hwpm_err(hwpm, "Failed to add cdev"); goto cdev_add; } @@ -94,7 +88,7 @@ static int tegra_soc_hwpm_probe(struct platform_device *pdev) NULL, TEGRA_SOC_HWPM_MODULE_NAME); if (IS_ERR(dev)) { - tegra_soc_hwpm_err("Failed to create device"); + tegra_hwpm_err(hwpm, "Failed to create device"); ret = PTR_ERR(dev); goto device_create; } @@ -104,36 +98,35 @@ static int tegra_soc_hwpm_probe(struct platform_device *pdev) if (tegra_platform_is_silicon()) { hwpm->la_clk = devm_clk_get(hwpm->dev, "la"); if (IS_ERR(hwpm->la_clk)) { - tegra_soc_hwpm_err("Missing la clock"); + tegra_hwpm_err(hwpm, "Missing la clock"); ret = PTR_ERR(hwpm->la_clk); goto clock_reset_fail; } hwpm->la_parent_clk = devm_clk_get(hwpm->dev, "parent"); if (IS_ERR(hwpm->la_parent_clk)) { - tegra_soc_hwpm_err("Missing la parent clk"); + tegra_hwpm_err(hwpm, "Missing la parent clk"); ret = PTR_ERR(hwpm->la_parent_clk); goto clock_reset_fail; } hwpm->la_rst = devm_reset_control_get(hwpm->dev, "la"); if (IS_ERR(hwpm->la_rst)) { - tegra_soc_hwpm_err("Missing la reset"); + tegra_hwpm_err(hwpm, "Missing la reset"); ret = PTR_ERR(hwpm->la_rst); goto clock_reset_fail; } hwpm->hwpm_rst = devm_reset_control_get(hwpm->dev, "hwpm"); if (IS_ERR(hwpm->hwpm_rst)) { - tegra_soc_hwpm_err("Missing hwpm reset"); + tegra_hwpm_err(hwpm, "Missing hwpm reset"); ret = PTR_ERR(hwpm->hwpm_rst); goto clock_reset_fail; } } tegra_soc_hwpm_debugfs_init(hwpm); - hwpm->dt_apertures = tegra_soc_hwpm_init_dt_apertures(); - hwpm->ip_info = tegra_soc_hwpm_init_ip_ops_info(); + tegra_soc_hwpm_init_chip_info(hwpm); /* * Currently VDK doesn't have a fmodel for SOC HWPM. Therefore, we @@ -147,7 +140,7 @@ static int tegra_soc_hwpm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hwpm); tegra_soc_hwpm_pdev = pdev; - tegra_soc_hwpm_dbg("Probe successful!"); + tegra_hwpm_dbg(hwpm, hwpm_info, "Probe successful!"); goto success; @@ -171,7 +164,7 @@ alloc_chrdev_region: class_register: kfree(hwpm); fail: - tegra_soc_hwpm_err("Probe failed!"); + tegra_hwpm_err(NULL, "Probe failed!"); success: return ret; } @@ -181,18 +174,16 @@ static int tegra_soc_hwpm_remove(struct platform_device *pdev) struct tegra_soc_hwpm *hwpm = NULL; if (!pdev) { - tegra_soc_hwpm_err("Invalid platform device"); + tegra_hwpm_err(hwpm, "Invalid platform device"); return -ENODEV; } hwpm = platform_get_drvdata(pdev); if (!hwpm) { - tegra_soc_hwpm_err("Invalid hwpm struct"); + tegra_hwpm_err(hwpm, "Invalid hwpm struct"); return -ENODEV; } - tegra_soc_hwpm_debugfs_deinit(hwpm); - if (tegra_platform_is_silicon()) { if (hwpm->la_clk) devm_clk_put(hwpm->dev, hwpm->la_clk); @@ -209,8 +200,8 @@ static int tegra_soc_hwpm_remove(struct platform_device *pdev) unregister_chrdev_region(hwpm->dev_t, 1); class_unregister(&hwpm->class); - kfree(hwpm); - tegra_soc_hwpm_pdev = NULL; + tegra_soc_hwpm_debugfs_deinit(hwpm); + tegra_soc_hwpm_release_sw_components(hwpm); return 0; } @@ -230,14 +221,13 @@ static int __init tegra_soc_hwpm_init(void) ret = platform_driver_register(&tegra_soc_hwpm_pdrv); if (ret < 0) - tegra_soc_hwpm_err("Platform driver register failed"); + tegra_hwpm_err(NULL, "Platform driver register failed"); return ret; } static void __exit tegra_soc_hwpm_exit(void) { - tegra_soc_hwpm_dbg("Unloading the Tegra SOC HWPM driver"); platform_driver_unregister(&tegra_soc_hwpm_pdrv); } diff --git a/tegra-soc-hwpm-log.c b/os/linux/tegra_hwpm_log.c similarity index 51% rename from tegra-soc-hwpm-log.c rename to os/linux/tegra_hwpm_log.c index e96975c..b142fea 100644 --- a/tegra-soc-hwpm-log.c +++ b/os/linux/tegra_hwpm_log.c @@ -1,8 +1,5 @@ /* - * tegra-soc-hwpm-log.c: - * This file adds logging APIs for the Tegra SOC HWPM driver. - * - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,36 +9,32 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . */ #include -#include "tegra-soc-hwpm.h" -#include "tegra-soc-hwpm-log.h" +#include +#include #define LOG_BUF_SIZE 160 -static void tegra_soc_hwpm_print(const char *func, - int line, - int type, - const char *log) +static void tegra_soc_hwpm_print(const char *func, int line, + int type, const char *log) { switch (type) { - case tegra_soc_hwpm_log_err: + case TEGRA_HWPM_ERROR: pr_err(TEGRA_SOC_HWPM_MODULE_NAME ": %s: %d: ERROR: %s\n", func, line, log); break; - case tegra_soc_hwpm_log_dbg: + case TEGRA_HWPM_DEBUG: pr_info(TEGRA_SOC_HWPM_MODULE_NAME ": %s: %d: DEBUG: %s\n", func, line, log); break; } } -void tegra_soc_hwpm_log(const char *func, int line, int type, const char *fmt, ...) +void tegra_soc_err_impl(struct tegra_soc_hwpm *hwpm, + const char *func, int line, const char *fmt, ...) { char log[LOG_BUF_SIZE]; va_list args; @@ -50,5 +43,22 @@ void tegra_soc_hwpm_log(const char *func, int line, int type, const char *fmt, . (void) vsnprintf(log, LOG_BUF_SIZE, fmt, args); va_end(args); - tegra_soc_hwpm_print(func, line, type, log); + tegra_soc_hwpm_print(func, line, TEGRA_HWPM_ERROR, log); +} + +void tegra_hwpm_dbg_impl(struct tegra_soc_hwpm *hwpm, + u32 dbg_mask, const char *func, int line, const char *fmt, ...) +{ + char log[LOG_BUF_SIZE]; + va_list args; + + if ((hwpm == NULL) || ((dbg_mask & hwpm->dbg_mask) == 0)) { + return; + } + + va_start(args, fmt); + (void) vsnprintf(log, LOG_BUF_SIZE, fmt, args); + va_end(args); + + tegra_soc_hwpm_print(func, line, TEGRA_HWPM_DEBUG, log); } diff --git a/tegra-soc-hwpm-io.c b/tegra-soc-hwpm-io.c deleted file mode 100644 index ab1d9bc..0000000 --- a/tegra-soc-hwpm-io.c +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * tegra-soc-hwpm-io.c: - * This file contains register read/write functions for the Tegra SOC HWPM - * driver. - */ - -#include -#include -#include -#include - -#include "tegra-soc-hwpm-io.h" -#include "tegra-soc-hwpm-log.h" -#include -#include -#include - -static u32 fake_readl(struct tegra_soc_hwpm *hwpm, u64 phys_addr) -{ - u32 reg_val = 0; - u64 updated_pa = 0ULL; - struct hwpm_resource_aperture *aperture = NULL; - - if (!hwpm->fake_registers_enabled) { - tegra_soc_hwpm_err("Fake registers are disabled!"); - return 0; - } - - aperture = tegra_soc_hwpm_find_aperture(hwpm, phys_addr, false, false, &updated_pa); - if (!aperture) { - tegra_soc_hwpm_err("Invalid reg op address(0x%llx)", phys_addr); - return 0; - } - - reg_val = aperture->fake_registers[(updated_pa - aperture->start_pa)/4]; - return reg_val; -} - -static void fake_writel(struct tegra_soc_hwpm *hwpm, - u64 phys_addr, - u32 val) -{ - struct hwpm_resource_aperture *aperture = NULL; - u64 updated_pa = 0ULL; - - if (!hwpm->fake_registers_enabled) { - tegra_soc_hwpm_err("Fake registers are disabled!"); - return; - } - - aperture = tegra_soc_hwpm_find_aperture(hwpm, phys_addr, false, false, &updated_pa); - if (!aperture) { - tegra_soc_hwpm_err("Invalid reg op address(0x%llx)", phys_addr); - return; - } - - aperture->fake_registers[(updated_pa - aperture->start_pa)/4] = val; -} - -/* Read a HWPM (PERFMON, PMA, or RTR) register */ -u32 hwpm_readl(struct tegra_soc_hwpm *hwpm, u32 dt_aperture, u32 reg_offset) -{ - if (!tegra_soc_hwpm_is_dt_aperture(dt_aperture)) { - tegra_soc_hwpm_err("Invalid dt aperture(%d)", dt_aperture); - return 0; - } - - tegra_soc_hwpm_dbg( - "dt_aperture(%d): dt_aperture addr(0x%llx) reg_offset(0x%x)", - dt_aperture, (unsigned long long *)hwpm->dt_apertures[dt_aperture], - reg_offset); - - if (hwpm->fake_registers_enabled) { - u64 base_pa = tegra_soc_hwpm_get_perfmon_base(dt_aperture); - - return fake_readl(hwpm, base_pa + reg_offset); - } else { - return readl(hwpm->dt_apertures[dt_aperture] + reg_offset); - } -} - -/* Write a HWPM (PERFMON, PMA, or RTR) register */ -void hwpm_writel(struct tegra_soc_hwpm *hwpm, u32 dt_aperture, - u32 reg_offset, u32 val) -{ - if (!tegra_soc_hwpm_is_dt_aperture(dt_aperture)) { - tegra_soc_hwpm_err("Invalid dt aperture(%d)", dt_aperture); - return; - } - - tegra_soc_hwpm_dbg( - "dt_aperture(%d): dt_aperture addr(0x%llx) " - "reg_offset(0x%x), val(0x%x)", - dt_aperture, (unsigned long long *)hwpm->dt_apertures[dt_aperture], - reg_offset, val); - - if (hwpm->fake_registers_enabled) { - u64 base_pa = tegra_soc_hwpm_get_perfmon_base(dt_aperture); - - fake_writel(hwpm, base_pa + reg_offset, val); - } else { - writel(val, hwpm->dt_apertures[dt_aperture] + reg_offset); - } -} - -u32 ip_readl(struct tegra_soc_hwpm *hwpm, u64 phys_addr) -{ - tegra_soc_hwpm_dbg("reg read: phys_addr(0x%llx)", phys_addr); - - if (hwpm->fake_registers_enabled) { - return fake_readl(hwpm, phys_addr); - } else { - u64 ip_start_pa = 0ULL; - u32 reg_val = 0U; - u32 dt_aperture = tegra_soc_hwpm_get_ip_aperture(hwpm, - phys_addr, &ip_start_pa); - struct tegra_soc_hwpm_ip_ops *ip_ops = - dt_aperture == TEGRA_SOC_HWPM_DT_APERTURE_INVALID ? - NULL : &hwpm->ip_info[dt_aperture]; - - if (ip_ops && (*ip_ops->hwpm_ip_reg_op)) { - int err = 0; - - tegra_soc_hwpm_dbg( - "aperture: %d ip_ops offset(0x%llx)", - dt_aperture, (phys_addr - ip_start_pa)); - err = (*ip_ops->hwpm_ip_reg_op)(ip_ops->ip_dev, - TEGRA_SOC_HWPM_IP_REG_OP_READ, - (phys_addr - ip_start_pa), ®_val); - if (err < 0) { - tegra_soc_hwpm_err( - "Failed to read ip register(0x%llx)", - phys_addr); - return 0U; - } - } else { - /* Fall back to un-registered IP method */ - void __iomem *ptr = NULL; - - ptr = ioremap(phys_addr, 0x4); - if (!ptr) { - tegra_soc_hwpm_err( - "Failed to map register(0x%llx)", - phys_addr); - return 0U; - } - reg_val = __raw_readl(ptr); - iounmap(ptr); - } - return reg_val; - } -} - -void ip_writel(struct tegra_soc_hwpm *hwpm, u64 phys_addr, u32 reg_val) -{ - tegra_soc_hwpm_dbg("reg write: phys_addr(0x%llx), val(0x%x)", - phys_addr, reg_val); - - if (hwpm->fake_registers_enabled) { - fake_writel(hwpm, phys_addr, reg_val); - } else { - u64 ip_start_pa = 0ULL; - u32 dt_aperture = tegra_soc_hwpm_get_ip_aperture(hwpm, - phys_addr, &ip_start_pa); - struct tegra_soc_hwpm_ip_ops *ip_ops = - dt_aperture == TEGRA_SOC_HWPM_DT_APERTURE_INVALID ? - NULL : &hwpm->ip_info[dt_aperture]; - - if (ip_ops && (*ip_ops->hwpm_ip_reg_op)) { - int err = 0; - - tegra_soc_hwpm_dbg( - "aperture: %d ip_ops offset(0x%llx)", - dt_aperture, (phys_addr - ip_start_pa)); - err = (*ip_ops->hwpm_ip_reg_op)(ip_ops->ip_dev, - TEGRA_SOC_HWPM_IP_REG_OP_WRITE, - (phys_addr - ip_start_pa), ®_val); - if (err < 0) { - tegra_soc_hwpm_err( - "write ip reg(0x%llx) val 0x%x failed", - phys_addr, reg_val); - return; - } - } else { - /* Fall back to un-registered IP method */ - void __iomem *ptr = NULL; - - ptr = ioremap(phys_addr, 0x4); - if (!ptr) { - tegra_soc_hwpm_err( - "Failed to map register(0x%llx)", - phys_addr); - return; - } - __raw_writel(reg_val, ptr); - iounmap(ptr); - } - } -} - -/* - * Read a register from the EXEC_REG_OPS IOCTL. It is assumed that the allowlist - * check has been done before calling this function. - */ -u32 ioctl_readl(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - u64 addr) -{ - u32 reg_val = 0; - - if (!aperture) { - tegra_soc_hwpm_err("aperture is NULL"); - return 0; - } - - if (aperture->is_ip) { - reg_val = ip_readl(hwpm, addr); - } else { - reg_val = hwpm_readl(hwpm, aperture->dt_aperture, - addr - aperture->start_pa); - } - return reg_val; -} - -/* - * Write a register from the EXEC_REG_OPS IOCTL. It is assumed that the - * allowlist check has been done before calling this function. - */ -void ioctl_writel(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - u64 addr, - u32 val) -{ - if (!aperture) { - tegra_soc_hwpm_err("aperture is NULL"); - return; - } - - if (aperture->is_ip) { - ip_writel(hwpm, addr, val); - } else { - hwpm_writel(hwpm, aperture->dt_aperture, - addr - aperture->start_pa, val); - } -} - -/* Read Modify Write register operation */ -int reg_rmw(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - u32 dt_aperture, - u64 addr, - u32 field_mask, - u32 field_val, - bool is_ioctl, - bool is_ip) -{ - u32 reg_val = 0; - - if (is_ioctl) { - if (!aperture) { - tegra_soc_hwpm_err("aperture is NULL"); - return -EIO; - } - } - if (!is_ip) { - if (!tegra_soc_hwpm_is_dt_aperture(dt_aperture)) { - tegra_soc_hwpm_err("Invalid dt_aperture(%d)", - dt_aperture); - return -EIO; - } - } - - /* Read current register value */ - if (is_ioctl) - reg_val = ioctl_readl(hwpm, aperture, addr); - else if (is_ip) - reg_val = ip_readl(hwpm, addr); - else - reg_val = hwpm_readl(hwpm, dt_aperture, addr); - - /* Clear and write masked bits */ - reg_val &= ~field_mask; - reg_val |= field_val & field_mask; - - /* Write modified value to register */ - if (is_ioctl) - ioctl_writel(hwpm, aperture, addr, reg_val); - else if (is_ip) - ip_writel(hwpm, addr, reg_val); - else - hwpm_writel(hwpm, dt_aperture, addr, reg_val); - - return 0; -} diff --git a/tegra-soc-hwpm-io.h b/tegra-soc-hwpm-io.h deleted file mode 100644 index ad3304d..0000000 --- a/tegra-soc-hwpm-io.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * tegra-soc-hwpm-io.h: - * This header defines register read/write APIs for the Tegra SOC HWPM driver. - * - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef TEGRA_SOC_HWPM_IO_H -#define TEGRA_SOC_HWPM_IO_H - -struct tegra_soc_hwpm; -struct hwpm_resource_aperture; - -struct hwpm_resource_aperture *find_hwpm_aperture(struct tegra_soc_hwpm *hwpm, - u64 phys_addr, - bool use_absolute_base, - bool check_reservation, - u64 *updated_pa); -u32 hwpm_readl(struct tegra_soc_hwpm *hwpm, - u32 dt_aperture, - u32 reg_offset); -void hwpm_writel(struct tegra_soc_hwpm *hwpm, - u32 dt_aperture, - u32 reg_offset, u32 val); -u32 ip_readl(struct tegra_soc_hwpm *hwpm, u64 phys_addr); -void ip_writel(struct tegra_soc_hwpm *hwpm, u64 phys_addr, u32 reg_val); -u32 ioctl_readl(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - u64 addr); -void ioctl_writel(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - u64 addr, - u32 val); -int reg_rmw(struct tegra_soc_hwpm *hwpm, - struct hwpm_resource_aperture *aperture, - u32 dt_aperture, - u64 addr, - u32 field_mask, - u32 field_val, - bool is_ioctl, - bool is_ip); - -#endif /* TEGRA_SOC_HWPM_IO_H */ diff --git a/tegra-soc-hwpm-ip.c b/tegra-soc-hwpm-ip.c deleted file mode 100644 index 8507723..0000000 --- a/tegra-soc-hwpm-ip.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * tegra-soc-hwpm-ip.c: - * This file contains functions for SOC HWPM <-> IPC communication. - */ - -#include -#include -#include -#include "tegra-soc-hwpm-log.h" -#include "tegra-soc-hwpm.h" - -struct platform_device *tegra_soc_hwpm_pdev; - -void tegra_soc_hwpm_ip_register(struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops) -{ - struct tegra_soc_hwpm *hwpm = NULL; - u32 dt_aperture; - - tegra_soc_hwpm_dbg("HWPM Registered IP 0x%llx", - hwpm_ip_ops->ip_base_address); - - if (tegra_soc_hwpm_pdev == NULL) { - tegra_soc_hwpm_dbg( - "IP register before SOC HWPM 0x%llx", - hwpm_ip_ops->ip_base_address); - } else { - if (hwpm_ip_ops->ip_dev == NULL) { - tegra_soc_hwpm_err("IP dev is NULL"); - return; - } - hwpm = platform_get_drvdata(tegra_soc_hwpm_pdev); - dt_aperture = tegra_soc_hwpm_get_ip_aperture(hwpm, - hwpm_ip_ops->ip_base_address, NULL); - if (dt_aperture != TEGRA_SOC_HWPM_DT_APERTURE_INVALID) { - memcpy(&hwpm->ip_info[dt_aperture], hwpm_ip_ops, - sizeof(struct tegra_soc_hwpm_ip_ops)); - } else { - tegra_soc_hwpm_err( - "SOC HWPM has no support for 0x%llx", - hwpm_ip_ops->ip_base_address); - } - } - -} - -void tegra_soc_hwpm_ip_unregister(struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops) -{ - struct tegra_soc_hwpm *hwpm = NULL; - u32 dt_aperture; - - if (tegra_soc_hwpm_pdev == NULL) { - tegra_soc_hwpm_dbg("IP unregister before SOC HWPM 0x%llx", - hwpm_ip_ops->ip_base_address); - } else { - if (hwpm_ip_ops->ip_dev == NULL) { - tegra_soc_hwpm_err("IP dev is NULL"); - return; - } - hwpm = platform_get_drvdata(tegra_soc_hwpm_pdev); - dt_aperture = tegra_soc_hwpm_get_ip_aperture(hwpm, - hwpm_ip_ops->ip_base_address, NULL); - if (dt_aperture != TEGRA_SOC_HWPM_DT_APERTURE_INVALID) { - memset(&hwpm->ip_info[dt_aperture], 0, - sizeof(struct tegra_soc_hwpm_ip_ops)); - } - } -} diff --git a/tegra-soc-hwpm-log.h b/tegra-soc-hwpm-log.h deleted file mode 100644 index deb1659..0000000 --- a/tegra-soc-hwpm-log.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * tegra-soc-hwpm-log.h: - * This is the logging API header for the Tegra SOC HWPM driver. - * - * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef TEGRA_SOC_HWPM_LOG_H -#define TEGRA_SOC_HWPM_LOG_H - -#define TEGRA_SOC_HWPM_MODULE_NAME "tegra-soc-hwpm" - -enum tegra_soc_hwpm_log_type { - tegra_soc_hwpm_log_err, /* Error prints */ - tegra_soc_hwpm_log_dbg, /* Debug prints */ -}; - -#define tegra_soc_hwpm_err(fmt, arg...) \ - tegra_soc_hwpm_log(__func__, __LINE__, tegra_soc_hwpm_log_err, \ - fmt, ##arg) -#define tegra_soc_hwpm_dbg(fmt, arg...) \ - tegra_soc_hwpm_log(__func__, __LINE__, tegra_soc_hwpm_log_dbg, \ - fmt, ##arg) - -void tegra_soc_hwpm_log(const char *func, int line, int type, const char *fmt, ...); - -#endif /* TEGRA_SOC_HWPM_LOG_H */