tegra: hwpm: add HALs to support multiple chip

Add below HALs to make code chip agnostic. This will allow us to use
t234 specific HALs for next chips.
- get_pma_int_idx: get PMA's internal index corresponding to active chip
- get_rtr_int_idx: get RTR's internal index corresponding to active chip
- get_ip_max_idx: get MAX IP index corresponding to active chip

Move chip agnostic code to common files.

Jira THWPM-41

Change-Id: I5518469b1473fe7f66b6517cee729cf46520bbac
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2675515
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Vasuki Shankar <vasukis@nvidia.com>
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Vedashree Vidwans
2022-03-07 14:08:00 -08:00
committed by mobile promotions
parent 53f8d0799c
commit ea5e4e406b
18 changed files with 1908 additions and 1723 deletions

View File

@@ -37,6 +37,8 @@ obj-y += os/linux/tegra_hwpm_ioctl.o
obj-y += os/linux/tegra_hwpm_log.o
obj-y += common/tegra_hwpm_alist_utils.o
obj-y += common/tegra_hwpm_aperture_utils.o
obj-y += common/tegra_hwpm_ip_utils.o
obj-y += common/tegra_hwpm_mem_buf_utils.o
obj-y += common/tegra_hwpm_regops_utils.o
obj-y += common/tegra_hwpm_resource_utils.o

View File

@@ -23,11 +23,98 @@
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_static_analysis.h>
static int tegra_hwpm_get_alist_size(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
u32 ip_idx;
u32 perfmux_idx, perfmon_idx;
unsigned long inst_idx = 0UL;
unsigned long floorsweep_info = 0UL;
struct hwpm_ip *chip_ip = NULL;
hwpm_ip_perfmux *perfmux = NULL;
hwpm_ip_perfmon *perfmon = NULL;
tegra_hwpm_fn(hwpm, " ");
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* Skip unavailable IPs */
if (!chip_ip->reserved) {
continue;
}
if (chip_ip->fs_mask == 0U) {
/* No IP instance is available */
continue;
}
floorsweep_info = (unsigned long)chip_ip->fs_mask;
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Add perfmux alist size to full alist size */
for (perfmux_idx = 0U;
perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
if (perfmux->alist) {
hwpm->full_alist_size =
tegra_hwpm_safe_add_u64(
hwpm->full_alist_size,
perfmux->alist_size);
} else {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d NULL alist",
ip_idx, perfmux_idx);
}
}
/* Add perfmon alist size to full alist size */
for (perfmon_idx = 0U;
perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
if (perfmon->alist) {
hwpm->full_alist_size =
tegra_hwpm_safe_add_u64(
hwpm->full_alist_size,
perfmon->alist_size);
} else {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d NULL alist",
ip_idx, perfmon_idx);
}
}
}
}
return 0;
}
int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
@@ -36,11 +123,7 @@ int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm)
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip->get_alist_size == NULL) {
tegra_hwpm_err(hwpm, "get_alist_size uninitialized");
return -ENODEV;
}
ret = hwpm->active_chip->get_alist_size(hwpm);
ret = tegra_hwpm_get_alist_size(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "get_alist_size failed");
return ret;
@@ -49,6 +132,105 @@ int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm)
return 0;
}
static int tegra_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
u32 ip_idx;
u32 perfmux_idx, perfmon_idx;
unsigned long inst_idx = 0UL;
unsigned long floorsweep_info = 0UL;
struct hwpm_ip *chip_ip = NULL;
hwpm_ip_perfmux *perfmux = NULL;
hwpm_ip_perfmon *perfmon = NULL;
u64 full_alist_idx = 0;
int err = 0;
tegra_hwpm_fn(hwpm, " ");
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* Skip unavailable IPs */
if (!chip_ip->reserved) {
continue;
}
if (chip_ip->fs_mask == 0U) {
/* No IP instance is available */
continue;
}
if (hwpm->active_chip->copy_alist == NULL) {
tegra_hwpm_err(hwpm, "copy_alist uninitialized");
return -ENODEV;
}
floorsweep_info = (unsigned long)chip_ip->fs_mask;
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Copy perfmux alist to full alist array */
for (perfmux_idx = 0U;
perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = hwpm->active_chip->copy_alist(hwpm,
perfmux, alist, &full_alist_idx);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d alist copy failed",
ip_idx, perfmux_idx);
goto fail;
}
}
/* Copy perfmon alist to full alist array */
for (perfmon_idx = 0U;
perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = hwpm->active_chip->copy_alist(hwpm,
perfmon, alist, &full_alist_idx);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d alist copy failed",
ip_idx, perfmon_idx);
goto fail;
}
}
}
}
/* Check size of full alist with hwpm->full_alist_size*/
if (full_alist_idx != hwpm->full_alist_size) {
tegra_hwpm_err(hwpm, "full_alist_size 0x%llx doesn't match "
"max full_alist_idx 0x%llx",
hwpm->full_alist_size, full_alist_idx);
err = -EINVAL;
}
fail:
return err;
}
int tegra_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm,
void *ioctl_struct)
{
@@ -112,11 +294,7 @@ int tegra_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm,
}
full_alist_u64 = (u64 *)(full_alist + offset);
if (hwpm->active_chip->combine_alist == NULL) {
tegra_hwpm_err(hwpm, "combine_alist uninitialized");
return -ENODEV;
}
err = hwpm->active_chip->combine_alist(hwpm, full_alist_u64);
err = tegra_hwpm_combine_alist(hwpm, full_alist_u64);
if (err != 0) {
goto alist_unmap;
}

View File

@@ -0,0 +1,426 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/slab.h>
#include <linux/of_address.h>
#include <tegra_hwpm_static_analysis.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
int tegra_hwpm_perfmon_reserve(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon)
{
struct resource *res = NULL;
tegra_hwpm_fn(hwpm, " ");
/* Reserve */
res = platform_get_resource_byname(hwpm->pdev,
IORESOURCE_MEM, perfmon->name);
if ((!res) || (res->start == 0) || (res->end == 0)) {
tegra_hwpm_err(hwpm, "Failed to get perfmon %s", perfmon->name);
return -ENOMEM;
}
perfmon->dt_mmio = devm_ioremap(hwpm->dev, res->start,
resource_size(res));
if (IS_ERR(perfmon->dt_mmio)) {
tegra_hwpm_err(hwpm, "Couldn't map perfmon %s", perfmon->name);
return PTR_ERR(perfmon->dt_mmio);
}
perfmon->start_pa = res->start;
perfmon->end_pa = res->end;
if (hwpm->fake_registers_enabled) {
u64 address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(res->end, res->start), 1ULL);
u64 num_regs = address_range / sizeof(u32);
perfmon->fake_registers = (u32 *)kzalloc(sizeof(u32) * num_regs,
GFP_KERNEL);
if (perfmon->fake_registers == NULL) {
tegra_hwpm_err(hwpm, "Perfmon (0x%llx - 0x%llx) "
"Couldn't allocate memory for fake regs",
perfmon->start_abs_pa, perfmon->end_abs_pa);
return -ENOMEM;
}
}
return 0;
}
int tegra_hwpm_perfmon_release(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon)
{
tegra_hwpm_fn(hwpm, " ");
if (perfmon->dt_mmio == NULL) {
tegra_hwpm_err(hwpm, "Perfmon was not mapped");
return -EINVAL;
}
devm_iounmap(hwpm->dev, perfmon->dt_mmio);
perfmon->dt_mmio = NULL;
perfmon->start_pa = 0ULL;
perfmon->end_pa = 0ULL;
if (perfmon->fake_registers) {
kfree(perfmon->fake_registers);
perfmon->fake_registers = NULL;
}
return 0;
}
int tegra_hwpm_perfmux_reserve(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux)
{
int err = 0;
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
/*
* Indicate that HWPM driver is initializing monitoring.
* Since perfmux is controlled by IP, indicate monitoring enabled
* by disabling IP power management.
*/
/* Make sure that ip_ops are initialized */
if ((perfmux->ip_ops.ip_dev != NULL) &&
(perfmux->ip_ops.hwpm_ip_pm != NULL)) {
err = (*perfmux->ip_ops.hwpm_ip_pm)(
perfmux->ip_ops.ip_dev, true);
if (err != 0) {
tegra_hwpm_err(hwpm, "Runtime PM disable failed");
}
} else {
tegra_hwpm_dbg(hwpm, hwpm_verbose, "Runtime PM not configured");
}
perfmux->start_pa = perfmux->start_abs_pa;
perfmux->end_pa = perfmux->end_abs_pa;
/* Allocate fake registers */
if (hwpm->fake_registers_enabled) {
u64 address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(
perfmux->end_pa, perfmux->start_pa), 1ULL);
u64 num_regs = address_range / sizeof(u32);
perfmux->fake_registers = (u32 *)kzalloc(
sizeof(u32) * num_regs, GFP_KERNEL);
if (perfmux->fake_registers == NULL) {
tegra_hwpm_err(hwpm, "Aperture(0x%llx - 0x%llx):"
" Couldn't allocate memory for fake registers",
perfmux->start_pa, perfmux->end_pa);
ret = -ENOMEM;
goto fail;
}
}
fail:
return ret;
}
int tegra_hwpm_perfmux_release(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux)
{
tegra_hwpm_fn(hwpm, " ");
/*
* Release
* This is only required for for fake registers
*/
if (perfmux->fake_registers) {
kfree(perfmux->fake_registers);
perfmux->fake_registers = NULL;
}
return 0;
}
int tegra_hwpm_reserve_pma(struct tegra_soc_hwpm *hwpm)
{
u32 perfmux_idx = 0U, perfmon_idx;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip_pma = NULL;
hwpm_ip_perfmux *pma_perfmux = NULL;
hwpm_ip_perfmon *pma_perfmon = NULL;
int ret = 0, err = 0;
tegra_hwpm_fn(hwpm, " ");
chip_ip_pma = active_chip->chip_ips[active_chip->get_pma_int_idx(hwpm)];
/* Make sure that PMA is not reserved */
if (chip_ip_pma->reserved == true) {
tegra_hwpm_err(hwpm, "PMA already reserved, ignoring");
return 0;
}
/* Reserve PMA perfmux */
for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots;
perfmux_idx++) {
pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx];
if (pma_perfmux == NULL) {
continue;
}
/* Since PMA is hwpm component, use perfmon reserve function */
ret = tegra_hwpm_perfmon_reserve(hwpm, pma_perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmux %d reserve failed", perfmux_idx);
return ret;
}
chip_ip_pma->fs_mask |= pma_perfmux->hw_inst_mask;
}
/* Reserve PMA perfmons */
for (perfmon_idx = 0U; perfmon_idx < chip_ip_pma->num_perfmon_slots;
perfmon_idx++) {
pma_perfmon = chip_ip_pma->ip_perfmon[perfmon_idx];
if (pma_perfmon == NULL) {
continue;
}
ret = tegra_hwpm_perfmon_reserve(hwpm, pma_perfmon);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmon %d reserve failed", perfmon_idx);
goto fail;
}
}
chip_ip_pma->reserved = true;
return 0;
fail:
for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots;
perfmux_idx++) {
pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx];
if (pma_perfmux == NULL) {
continue;
}
/* Since PMA is hwpm component, use perfmon release function */
err = tegra_hwpm_perfmon_release(hwpm, pma_perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmux %d release failed", perfmux_idx);
}
chip_ip_pma->fs_mask &= ~(pma_perfmux->hw_inst_mask);
}
return ret;
}
int tegra_hwpm_release_pma(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
u32 perfmux_idx, perfmon_idx;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip_pma = NULL;
hwpm_ip_perfmux *pma_perfmux = NULL;
hwpm_ip_perfmon *pma_perfmon = NULL;
tegra_hwpm_fn(hwpm, " ");
chip_ip_pma = active_chip->chip_ips[active_chip->get_pma_int_idx(hwpm)];
if (!chip_ip_pma->reserved) {
tegra_hwpm_dbg(hwpm, hwpm_info, "PMA wasn't mapped, ignoring.");
return 0;
}
/* Release PMA perfmux */
for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots;
perfmux_idx++) {
pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx];
if (pma_perfmux == NULL) {
continue;
}
/* Since PMA is hwpm component, use perfmon release function */
ret = tegra_hwpm_perfmon_release(hwpm, pma_perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmux %d release failed", perfmux_idx);
return ret;
}
chip_ip_pma->fs_mask &= ~(pma_perfmux->hw_inst_mask);
}
/* Release PMA perfmons */
for (perfmon_idx = 0U; perfmon_idx < chip_ip_pma->num_perfmon_slots;
perfmon_idx++) {
pma_perfmon = chip_ip_pma->ip_perfmon[perfmon_idx];
if (pma_perfmon == NULL) {
continue;
}
ret = tegra_hwpm_perfmon_release(hwpm, pma_perfmon);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmon %d release failed", perfmon_idx);
return ret;
}
}
chip_ip_pma->reserved = false;
return 0;
}
int tegra_hwpm_reserve_rtr(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
u32 perfmux_idx = 0U, perfmon_idx;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip_rtr = NULL;
struct hwpm_ip *chip_ip_pma = NULL;
hwpm_ip_perfmux *pma_perfmux = NULL;
hwpm_ip_perfmux *rtr_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
chip_ip_pma = active_chip->chip_ips[active_chip->get_pma_int_idx(hwpm)];
chip_ip_rtr = active_chip->chip_ips[active_chip->get_rtr_int_idx(hwpm)];
/* Currently, PMA has only one perfmux */
pma_perfmux = &chip_ip_pma->perfmux_static_array[0U];
/* Verify that PMA is reserved before RTR */
if (chip_ip_pma->reserved == false) {
tegra_hwpm_err(hwpm, "PMA should be reserved before RTR");
return -EINVAL;
}
/* Make sure that RTR is not reserved */
if (chip_ip_rtr->reserved == true) {
tegra_hwpm_err(hwpm, "RTR already reserved, ignoring");
return 0;
}
/* Reserve RTR perfmuxes */
for (perfmux_idx = 0U; perfmux_idx < chip_ip_rtr->num_perfmux_slots;
perfmux_idx++) {
rtr_perfmux = chip_ip_rtr->ip_perfmux[perfmux_idx];
if (rtr_perfmux == NULL) {
continue;
}
if (rtr_perfmux->start_abs_pa == pma_perfmux->start_abs_pa) {
/* This is PMA perfmux wrt RTR aperture */
rtr_perfmux->start_pa = pma_perfmux->start_pa;
rtr_perfmux->end_pa = pma_perfmux->end_pa;
rtr_perfmux->dt_mmio = pma_perfmux->dt_mmio;
if (hwpm->fake_registers_enabled) {
rtr_perfmux->fake_registers =
pma_perfmux->fake_registers;
}
} else {
/* Since RTR is hwpm component,
* use perfmon reserve function */
ret = tegra_hwpm_perfmon_reserve(hwpm, rtr_perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"RTR perfmux %d reserve failed",
perfmux_idx);
return ret;
}
}
chip_ip_rtr->fs_mask |= rtr_perfmux->hw_inst_mask;
}
/* Reserve RTR perfmons */
for (perfmon_idx = 0U; perfmon_idx < chip_ip_rtr->num_perfmon_slots;
perfmon_idx++) {
/* No perfmons in RTR */
}
chip_ip_rtr->reserved = true;
return ret;
}
int tegra_hwpm_release_rtr(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
u32 perfmux_idx, perfmon_idx;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip_rtr = NULL;
struct hwpm_ip *chip_ip_pma = NULL;
hwpm_ip_perfmux *pma_perfmux = NULL;
hwpm_ip_perfmux *rtr_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
chip_ip_pma = active_chip->chip_ips[active_chip->get_pma_int_idx(hwpm)];
chip_ip_rtr = active_chip->chip_ips[active_chip->get_rtr_int_idx(hwpm)];
/* Currently, PMA has only one perfmux */
pma_perfmux = &chip_ip_pma->perfmux_static_array[0U];
/* Verify that PMA isn't released before RTR */
if (chip_ip_pma->reserved == false) {
tegra_hwpm_err(hwpm, "PMA shouldn't be released before RTR");
return -EINVAL;
}
if (!chip_ip_rtr->reserved) {
tegra_hwpm_dbg(hwpm, hwpm_info, "RTR wasn't mapped, ignoring.");
return 0;
}
/* Release RTR perfmux */
for (perfmux_idx = 0U; perfmux_idx < chip_ip_rtr->num_perfmux_slots;
perfmux_idx++) {
rtr_perfmux = chip_ip_rtr->ip_perfmux[perfmux_idx];
if (rtr_perfmux == NULL) {
continue;
}
if (rtr_perfmux->start_abs_pa == pma_perfmux->start_abs_pa) {
/* This is PMA perfmux wrt RTR aperture */
rtr_perfmux->start_pa = 0ULL;
rtr_perfmux->end_pa = 0ULL;
rtr_perfmux->dt_mmio = NULL;
if (hwpm->fake_registers_enabled) {
rtr_perfmux->fake_registers = NULL;
}
} else {
/* RTR is hwpm component, use perfmon release func */
ret = tegra_hwpm_perfmon_release(hwpm, rtr_perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"RTR perfmux %d release failed",
perfmux_idx);
return ret;
}
}
chip_ip_rtr->fs_mask &= ~(rtr_perfmux->hw_inst_mask);
}
/* Release RTR perfmon */
for (perfmon_idx = 0U; perfmon_idx < chip_ip_rtr->num_perfmon_slots;
perfmon_idx++) {
/* No RTR perfmons */
}
chip_ip_rtr->reserved = false;
return 0;
}

View File

@@ -29,9 +29,10 @@
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_common.h>
#include <hal/t234/t234_hwpm_init.h>
int tegra_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm)
static int tegra_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm)
{
int err = -EINVAL;
@@ -70,17 +71,60 @@ int tegra_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm)
return err;
}
int tegra_hwpm_init_sw_components(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
err = tegra_hwpm_init_chip_info(hwpm);
if (err != 0) {
tegra_hwpm_err(hwpm, "Failed to initialize current chip info.");
return err;
}
if (hwpm->active_chip->init_chip_ip_structures == NULL) {
tegra_hwpm_err(hwpm, "init_chip_ip_structures uninitialized");
}
err = hwpm->active_chip->init_chip_ip_structures(hwpm);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP structure init failed");
return err;
}
return 0;
}
void tegra_hwpm_release_sw_components(struct tegra_soc_hwpm *hwpm)
{
struct hwpm_ip_register_list *node = ip_register_list_head;
struct hwpm_ip_register_list *tmp_node = NULL;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip->release_sw_setup == NULL) {
tegra_hwpm_err(hwpm, "release_sw_setup uninitialized");
} else {
hwpm->active_chip->release_sw_setup(hwpm);
}
while (node != NULL) {
tmp_node = node;
node = tmp_node->next;
kfree(tmp_node);
}
kfree(hwpm->active_chip->chip_ips);
kfree(hwpm);
tegra_soc_hwpm_pdev = NULL;
}
int tegra_hwpm_setup_sw(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip->finalize_chip_info == NULL) {
tegra_hwpm_err(hwpm, "finalize_chip_info uninitialized");
goto enodev;
}
ret = hwpm->active_chip->finalize_chip_info(hwpm);
ret = tegra_hwpm_finalize_chip_info(hwpm);
if (ret < 0) {
tegra_hwpm_err(hwpm, "Unable to initialize chip fs_info");
goto fail;
@@ -91,8 +135,7 @@ int tegra_hwpm_setup_sw(struct tegra_soc_hwpm *hwpm)
hwpm->full_alist_size = 0;
return 0;
enodev:
ret = -ENODEV;
fail:
return ret;
}
@@ -219,26 +262,25 @@ fail:
return ret;
}
void tegra_hwpm_release_sw_components(struct tegra_soc_hwpm *hwpm)
void tegra_hwpm_release_sw_setup(struct tegra_soc_hwpm *hwpm)
{
struct hwpm_ip_register_list *node = ip_register_list_head;
struct hwpm_ip_register_list *tmp_node = NULL;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
u32 ip_idx;
tegra_hwpm_fn(hwpm, " ");
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
if (hwpm->active_chip->release_sw_setup == NULL) {
tegra_hwpm_err(hwpm, "release_sw_setup uninitialized");
} else {
hwpm->active_chip->release_sw_setup(hwpm);
/* Release perfmux array */
if (chip_ip->num_perfmux_per_inst != 0U) {
kfree(chip_ip->ip_perfmux);
}
while (node != NULL) {
tmp_node = node;
node = tmp_node->next;
kfree(tmp_node);
/* Release perfmon array */
if (chip_ip->num_perfmon_per_inst != 0U) {
kfree(chip_ip->ip_perfmon);
}
kfree(hwpm->active_chip->chip_ips);
kfree(hwpm);
tegra_soc_hwpm_pdev = NULL;
}
return;
}

View File

@@ -0,0 +1,521 @@
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/slab.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_static_analysis.h>
static int tegra_hwpm_init_ip_perfmux_apertures(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip *chip_ip)
{
u32 idx = 0U, perfmux_idx = 0U, max_perfmux = 0U;
u64 perfmux_address_range = 0ULL, perfmux_offset = 0ULL;
hwpm_ip_perfmux *perfmux = NULL;
/* Initialize perfmux array */
if (chip_ip->num_perfmux_per_inst == 0U) {
/* no perfmux in this IP */
return 0;
}
perfmux_address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(chip_ip->perfmux_range_end,
chip_ip->perfmux_range_start), 1ULL);
chip_ip->num_perfmux_slots = tegra_hwpm_safe_cast_u64_to_u32(
perfmux_address_range / chip_ip->inst_perfmux_stride);
chip_ip->ip_perfmux = kzalloc(
sizeof(hwpm_ip_perfmux *) * chip_ip->num_perfmux_slots,
GFP_KERNEL);
if (chip_ip->ip_perfmux == NULL) {
tegra_hwpm_err(hwpm, "Perfmux pointer array allocation failed");
return -ENOMEM;
}
/* Set all perfmux slot pointers to NULL */
for (idx = 0U; idx < chip_ip->num_perfmux_slots; idx++) {
chip_ip->ip_perfmux[idx] = NULL;
}
/* Assign valid perfmuxes to corresponding slot pointers */
max_perfmux = chip_ip->num_instances * chip_ip->num_perfmux_per_inst;
for (perfmux_idx = 0U; perfmux_idx < max_perfmux; perfmux_idx++) {
perfmux = &chip_ip->perfmux_static_array[perfmux_idx];
/* Compute perfmux offset from perfmux range start */
perfmux_offset = tegra_hwpm_safe_sub_u64(
perfmux->start_abs_pa, chip_ip->perfmux_range_start);
/* Compute perfmux slot index */
idx = tegra_hwpm_safe_cast_u64_to_u32(
perfmux_offset / chip_ip->inst_perfmux_stride);
/* Set perfmux slot pointer */
chip_ip->ip_perfmux[idx] = perfmux;
}
return 0;
}
static int tegra_hwpm_init_ip_perfmon_apertures(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip *chip_ip)
{
u32 idx = 0U, perfmon_idx = 0U, max_perfmon = 0U;
u64 perfmon_address_range = 0ULL, perfmon_offset = 0ULL;
hwpm_ip_perfmon *perfmon = NULL;
/* Initialize perfmon array */
if (chip_ip->num_perfmon_per_inst == 0U) {
/* no perfmons in this IP */
return 0;
}
perfmon_address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(chip_ip->perfmon_range_end,
chip_ip->perfmon_range_start), 1ULL);
chip_ip->num_perfmon_slots = tegra_hwpm_safe_cast_u64_to_u32(
perfmon_address_range / chip_ip->inst_perfmon_stride);
chip_ip->ip_perfmon = kzalloc(
sizeof(hwpm_ip_perfmon *) * chip_ip->num_perfmon_slots,
GFP_KERNEL);
if (chip_ip->ip_perfmon == NULL) {
tegra_hwpm_err(hwpm, "Perfmon pointer array allocation failed");
return -ENOMEM;
}
/* Set all perfmon slot pointers to NULL */
for (idx = 0U; idx < chip_ip->num_perfmon_slots; idx++) {
chip_ip->ip_perfmon[idx] = NULL;
}
/* Assign valid perfmuxes to corresponding slot pointers */
max_perfmon = chip_ip->num_instances * chip_ip->num_perfmon_per_inst;
for (perfmon_idx = 0U; perfmon_idx < max_perfmon; perfmon_idx++) {
perfmon = &chip_ip->perfmon_static_array[perfmon_idx];
/* Compute perfmon offset from perfmon range start */
perfmon_offset = tegra_hwpm_safe_sub_u64(
perfmon->start_abs_pa, chip_ip->perfmon_range_start);
/* Compute perfmon slot index */
idx = tegra_hwpm_safe_cast_u64_to_u32(
perfmon_offset / chip_ip->inst_perfmon_stride);
/* Set perfmon slot pointer */
chip_ip->ip_perfmon[idx] = perfmon;
}
return 0;
}
int tegra_hwpm_init_chip_ip_structures(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
u32 ip_idx;
int ret = 0;
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
ret = tegra_hwpm_init_ip_perfmon_apertures(hwpm, chip_ip);
if (ret != 0) {
tegra_hwpm_err(hwpm, "IP %d perfmon alloc failed",
ip_idx);
return ret;
}
ret = tegra_hwpm_init_ip_perfmux_apertures(hwpm, chip_ip);
if (ret != 0) {
tegra_hwpm_err(hwpm, "IP %d perfmux alloc failed",
ip_idx);
return ret;
}
}
return 0;
}
/*
* This function finds the IP perfmon index corresponding to given base address.
* Perfmon aperture belongs to IP domain and contains IP instance info
* wrt base address.
* Return instance index
*/
static int tegra_hwpm_find_ip_perfmon_index(struct tegra_soc_hwpm *hwpm,
u64 base_addr, u32 ip_index, u32 *ip_perfmon_idx)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_index];
u32 perfmon_idx;
u64 addr_offset = 0ULL;
hwpm_ip_perfmon *perfmon = NULL;
tegra_hwpm_fn(hwpm, " ");
if (ip_perfmon_idx == NULL) {
tegra_hwpm_err(hwpm, "pointer for ip_perfmon_idx is NULL");
return -EINVAL;
}
/* Validate phys_addr falls in IP address range */
if ((base_addr < chip_ip->perfmon_range_start) ||
(base_addr > chip_ip->perfmon_range_end)) {
tegra_hwpm_dbg(hwpm, hwpm_info,
"phys address 0x%llx not in IP %d",
base_addr, ip_index);
return -ENODEV;
}
/* Find IP instance for given phys_address */
/*
* Since all IP instances are configured to be in consecutive memory,
* instance index can be found using instance physical address stride.
*/
addr_offset = tegra_hwpm_safe_sub_u64(
base_addr, chip_ip->perfmon_range_start);
perfmon_idx = tegra_hwpm_safe_cast_u64_to_u32(
addr_offset / chip_ip->inst_perfmon_stride);
/* Make sure instance index is valid */
if (perfmon_idx >= chip_ip->num_perfmon_slots) {
tegra_hwpm_err(hwpm,
"IP:%d -> base addr 0x%llx is out of bounds",
ip_index, base_addr);
return -EINVAL;
}
/* Validate IP instance perfmon start address = given phys addr */
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
/*
* This a valid case. For example, not all MSS base addresses
* are shared between MSS IPs.
*/
tegra_hwpm_dbg(hwpm, hwpm_info,
"For addr 0x%llx IP %d perfmon_idx %d not populated",
base_addr, ip_index, perfmon_idx);
return -ENODEV;
}
if (base_addr != perfmon->start_abs_pa) {
tegra_hwpm_dbg(hwpm, hwpm_info,
"base addr 0x%llx != perfmon abs addr", base_addr);
return -EINVAL;
}
*ip_perfmon_idx = perfmon_idx;
return 0;
}
/*
* This function finds the IP perfmux index corresponding to given base address.
* Perfmux aperture belongs to IP domain and contains IP instance info
* wrt base address.
* Return instance index
*/
static int tegra_hwpm_find_ip_perfmux_index(struct tegra_soc_hwpm *hwpm,
u64 base_addr, u32 ip_index, u32 *ip_perfmux_idx)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_index];
u32 perfmux_idx;
u64 addr_offset = 0ULL;
hwpm_ip_perfmux *perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
if (ip_perfmux_idx == NULL) {
tegra_hwpm_err(hwpm, "pointer for ip_perfmux_idx is NULL");
return -EINVAL;
}
/* Validate phys_addr falls in IP address range */
if ((base_addr < chip_ip->perfmux_range_start) ||
(base_addr > chip_ip->perfmux_range_end)) {
tegra_hwpm_dbg(hwpm, hwpm_info,
"phys address 0x%llx not in IP %d",
base_addr, ip_index);
return -ENODEV;
}
/* Find IP instance for given phys_address */
/*
* Since all IP instances are configured to be in consecutive memory,
* instance index can be found using instance physical address stride.
*/
addr_offset = tegra_hwpm_safe_sub_u64(
base_addr, chip_ip->perfmux_range_start);
perfmux_idx = tegra_hwpm_safe_cast_u64_to_u32(
addr_offset / chip_ip->inst_perfmux_stride);
/* Make sure instance index is valid */
if (perfmux_idx >= chip_ip->num_perfmux_slots) {
tegra_hwpm_err(hwpm,
"IP:%d -> base addr 0x%llx is out of bounds",
ip_index, base_addr);
return -EINVAL;
}
/* Validate IP instance perfmux start address = given phys addr */
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
/*
* This a valid case. For example, not all MSS base addresses
* are shared between MSS IPs.
*/
tegra_hwpm_dbg(hwpm, hwpm_info,
"For addr 0x%llx IP %d perfmux_idx %d not populated",
base_addr, ip_index, perfmux_idx);
return -ENODEV;
}
if (base_addr != perfmux->start_abs_pa) {
tegra_hwpm_dbg(hwpm, hwpm_info,
"base addr 0x%llx != perfmux abs addr", base_addr);
return -EINVAL;
}
*ip_perfmux_idx = perfmux_idx;
return 0;
}
static int tegra_hwpm_update_ip_floorsweep_mask(struct tegra_soc_hwpm *hwpm,
u32 ip_idx, u32 hw_inst_mask, bool available)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx];
tegra_hwpm_fn(hwpm, " ");
/* Update floorsweep info */
if (available) {
chip_ip->fs_mask |= hw_inst_mask;
} else {
chip_ip->fs_mask &= ~(hw_inst_mask);
}
return 0;
}
static int tegra_hwpm_update_ip_ops_info(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops,
u32 ip_idx, u32 ip_perfmux_idx, bool available)
{
u32 perfmux_idx, max_num_perfmux = 0U;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx];
struct tegra_hwpm_ip_ops *ip_ops;
hwpm_ip_perfmux *given_perfmux = chip_ip->ip_perfmux[ip_perfmux_idx];
hwpm_ip_perfmux *perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
/* Update IP ops info for all perfmuxes in the instance */
max_num_perfmux = tegra_hwpm_safe_mult_u32(
chip_ip->num_instances, chip_ip->num_perfmux_per_inst);
for (perfmux_idx = 0U; perfmux_idx < max_num_perfmux; perfmux_idx++) {
perfmux = &chip_ip->perfmux_static_array[perfmux_idx];
if (perfmux->hw_inst_mask != given_perfmux->hw_inst_mask) {
continue;
}
ip_ops = &perfmux->ip_ops;
if (available) {
ip_ops->ip_dev = hwpm_ip_ops->ip_dev;
ip_ops->hwpm_ip_pm = hwpm_ip_ops->hwpm_ip_pm;
ip_ops->hwpm_ip_reg_op = hwpm_ip_ops->hwpm_ip_reg_op;
} else {
ip_ops->ip_dev = NULL;
ip_ops->hwpm_ip_pm = NULL;
ip_ops->hwpm_ip_reg_op = NULL;
}
}
return 0;
}
/*
* Find IP hw instance mask and update IP floorsweep info and IP ops.
*/
int tegra_hwpm_set_fs_info_ip_ops(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops,
u64 base_address, u32 ip_idx, bool available)
{
int ret = 0;
u32 perfmux_idx = 0U, perfmon_idx = 0U;
struct tegra_soc_hwpm_chip *active_chip = NULL;
struct hwpm_ip *chip_ip = NULL;
hwpm_ip_perfmux *perfmux = NULL;
hwpm_ip_perfmon *perfmon = NULL;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip == NULL) {
tegra_hwpm_err(hwpm, "chip struct not populated");
return -ENODEV;
}
active_chip = hwpm->active_chip;
if (ip_idx == TEGRA_SOC_HWPM_IP_INACTIVE) {
tegra_hwpm_err(hwpm, "invalid ip_idx %d", ip_idx);
return -EINVAL;
}
chip_ip = active_chip->chip_ips[ip_idx];
if (chip_ip == NULL) {
tegra_hwpm_err(hwpm, "IP %d not populated", ip_idx);
return -ENODEV;
}
if (chip_ip->override_enable) {
/* This IP should not be configured for HWPM */
tegra_hwpm_dbg(hwpm, hwpm_info,
"IP %d enable override", ip_idx);
return 0; /* Should this be notified to caller or ignored */
}
if (chip_ip->num_perfmux_per_inst != 0U) {
/* Step 1: find IP hw instance mask using perfmux */
ret = tegra_hwpm_find_ip_perfmux_index(hwpm,
base_address, ip_idx, &perfmux_idx);
if (ret != 0) {
/* Error will be printed handled by parent function */
goto fail;
}
perfmux = chip_ip->ip_perfmux[perfmux_idx];
/* Step 2: Update IP floorsweep info */
ret = tegra_hwpm_update_ip_floorsweep_mask(
hwpm, ip_idx, perfmux->hw_inst_mask, available);
if (ret != 0) {
tegra_hwpm_err(hwpm, "IP %d perfmux %d base 0x%llx: "
"FS mask update failed",
ip_idx, perfmux_idx, base_address);
goto fail;
}
if (hwpm_ip_ops != NULL) {
/* Update IP ops */
ret = tegra_hwpm_update_ip_ops_info(hwpm, hwpm_ip_ops,
ip_idx, perfmux_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d: Failed to update ip_ops",
ip_idx, perfmux_idx);
goto fail;
}
}
} else {
/* Step 1: find IP hw instance mask using perfmon */
ret = tegra_hwpm_find_ip_perfmon_index(hwpm,
base_address, ip_idx, &perfmon_idx);
if (ret != 0) {
/* Error will be printed handled by parent function */
goto fail;
}
perfmon = chip_ip->ip_perfmon[perfmon_idx];
/* Step 2: Update IP floorsweep info */
ret = tegra_hwpm_update_ip_floorsweep_mask(
hwpm, ip_idx, perfmon->hw_inst_mask, available);
if (ret != 0) {
tegra_hwpm_err(hwpm, "IP %d perfmon %d base 0x%llx: "
"FS mask update failed",
ip_idx, perfmon_idx, base_address);
goto fail;
}
}
fail:
return ret;
}
static int tegra_hwpm_complete_ip_register(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
struct hwpm_ip_register_list *node = ip_register_list_head;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip->extract_ip_ops == NULL) {
tegra_hwpm_err(hwpm, "extract_ip_ops uninitialized");
return -ENODEV;
}
while (node != NULL) {
tegra_hwpm_dbg(hwpm, hwpm_info, "IP ext idx %d info",
node->ip_ops.ip_index);
ret = hwpm->active_chip->extract_ip_ops(
hwpm, &node->ip_ops, true);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to extract IP ops");
return ret;
}
node = node->next;
}
return ret;
}
/*
* There are 3 ways to get info about available IPs
* 1. IP register to HWPM driver
* 2. IP register to HWPM before HWPM driver is probed
* 3. Force enabled IPs
*
* This function will handle case 2 and 3
*/
int tegra_hwpm_finalize_chip_info(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
/*
* Go through IP registration requests received before HWPM
* driver was probed.
*/
ret = tegra_hwpm_complete_ip_register(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed register IPs");
return ret;
}
if (hwpm->active_chip->force_enable_ips == NULL) {
tegra_hwpm_err(hwpm, "force_enable_ips uninitialized");
return -ENODEV;
}
ret = hwpm->active_chip->force_enable_ips(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to force enable IPs");
return ret;
}
return ret;
}

View File

@@ -19,6 +19,154 @@
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_static_analysis.h>
/* ip_idx indicates internal active ip index */
static int tegra_hwpm_reserve_given_resource(struct tegra_soc_hwpm *hwpm,
u32 ip_idx)
{
int err = 0, ret = 0;
u32 perfmux_idx, perfmon_idx;
unsigned long inst_idx = 0UL;
unsigned long floorsweep_info = 0UL, reserved_insts = 0UL;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx];
hwpm_ip_perfmon *perfmon = NULL;
hwpm_ip_perfmux *perfmux = NULL;
floorsweep_info = (unsigned long)chip_ip->fs_mask;
tegra_hwpm_fn(hwpm, " ");
tegra_hwpm_dbg(hwpm, hwpm_info, "Reserve IP %d, fs_mask 0x%x",
ip_idx, chip_ip->fs_mask);
/* PMA and RTR are already reserved */
if ((ip_idx == active_chip->get_pma_int_idx(hwpm)) ||
(ip_idx == active_chip->get_rtr_int_idx(hwpm))) {
return 0;
}
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Reserve all perfmon belonging to this instance */
for (perfmon_idx = 0U; perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = tegra_hwpm_perfmon_reserve(hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmon %d reserve failed",
ip_idx, perfmon_idx);
goto fail;
}
}
/* Reserve all perfmux belonging to this instance */
for (perfmux_idx = 0U; perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = tegra_hwpm_perfmux_reserve(hwpm, perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d reserve failed",
ip_idx, perfmux_idx);
goto fail;
}
}
reserved_insts |= BIT(inst_idx);
}
chip_ip->reserved = true;
return 0;
fail:
if (hwpm->active_chip->perfmon_disable == NULL) {
tegra_hwpm_err(hwpm, "perfmon_disable HAL uninitialized");
return -ENODEV;
}
if (hwpm->active_chip->perfmux_disable == NULL) {
tegra_hwpm_err(hwpm, "perfmux_disable HAL uninitialized");
return -ENODEV;
}
/* release reserved instances */
for_each_set_bit(inst_idx, &reserved_insts, 32U) {
/* Release all perfmon belonging to this instance */
for (perfmon_idx = 0U; perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
ret = hwpm->active_chip->perfmon_disable(hwpm, perfmon);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmon %d disable failed",
ip_idx, perfmon_idx);
}
ret = tegra_hwpm_perfmon_release(hwpm, perfmon);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmon %d release failed",
ip_idx, perfmon_idx);
}
}
/* Release all perfmux belonging to this instance */
for (perfmux_idx = 0U; perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
ret = hwpm->active_chip->perfmux_disable(hwpm, perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d disable failed",
ip_idx, perfmux_idx);
}
ret = tegra_hwpm_perfmux_release(hwpm, perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d release failed",
ip_idx, perfmux_idx);
}
}
}
return err;
}
int tegra_hwpm_reserve_resource(struct tegra_soc_hwpm *hwpm, u32 resource)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
@@ -61,12 +209,7 @@ int tegra_hwpm_reserve_resource(struct tegra_soc_hwpm *hwpm, u32 resource)
return 0;
}
if (active_chip->reserve_given_resource == NULL) {
tegra_hwpm_err(hwpm,
"reserve_given_resource HAL uninitialized");
return -ENODEV;
}
ret = active_chip->reserve_given_resource(hwpm, ip_idx);
ret = tegra_hwpm_reserve_given_resource(hwpm, ip_idx);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to reserve resource %d", resource);
return ret;
@@ -75,17 +218,250 @@ int tegra_hwpm_reserve_resource(struct tegra_soc_hwpm *hwpm, u32 resource)
return 0;
}
static int tegra_hwpm_bind_reserved_resources(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
u32 ip_idx;
u32 perfmux_idx, perfmon_idx;
unsigned long inst_idx = 0UL;
unsigned long floorsweep_info = 0UL;
int err = 0;
hwpm_ip_perfmon *perfmon = NULL;
hwpm_ip_perfmux *perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip->zero_alist_regs == NULL) {
tegra_hwpm_err(hwpm,
"zero_alist_regs HAL uninitialized");
return -ENODEV;
}
if (hwpm->active_chip->perfmon_enable == NULL) {
tegra_hwpm_err(hwpm,
"perfmon_enable HAL uninitialized");
return -ENODEV;
}
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* Skip unavailable IPs */
if (!chip_ip->reserved) {
continue;
}
if (chip_ip->fs_mask == 0U) {
/* No IP instance is available */
continue;
}
floorsweep_info = (unsigned long)chip_ip->fs_mask;
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Zero out necessary perfmux registers */
for (perfmux_idx = 0U;
perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = hwpm->active_chip->zero_alist_regs(
hwpm, perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d zero regs failed",
ip_idx, perfmux_idx);
}
}
/* Zero out necessary perfmon registers */
/* And enable reporting of PERFMON status */
for (perfmon_idx = 0U;
perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = hwpm->active_chip->zero_alist_regs(
hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d zero regs failed",
ip_idx, perfmon_idx);
}
err = hwpm->active_chip->perfmon_enable(
hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d enable failed",
ip_idx, perfmon_idx);
}
}
}
}
return err;
}
int tegra_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
ret = tegra_hwpm_bind_reserved_resources(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "failed to bind resources");
return ret;
}
return 0;
}
int tegra_hwpm_release_all_resources(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
hwpm_ip_perfmon *perfmon = NULL;
hwpm_ip_perfmux *perfmux = NULL;
u32 ip_idx;
u32 perfmux_idx, perfmon_idx;
unsigned long floorsweep_info = 0UL;
unsigned long inst_idx = 0UL;
int err = 0;
tegra_hwpm_fn(hwpm, " ");
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* PMA and RTR will be released later */
if ((ip_idx == active_chip->get_pma_int_idx(hwpm)) ||
(ip_idx == active_chip->get_rtr_int_idx(hwpm))) {
continue;
}
/* Disable only available IPs */
if (chip_ip->override_enable) {
/* IP not available */
continue;
}
/* Disable and release only reserved IPs */
if (!chip_ip->reserved) {
continue;
}
if (chip_ip->fs_mask == 0U) {
/* No IP instance is available */
continue;
}
if (hwpm->active_chip->perfmon_disable == NULL) {
tegra_hwpm_err(hwpm,
"perfmon_disable HAL uninitialized");
return -ENODEV;
}
if (hwpm->active_chip->perfmux_disable == NULL) {
tegra_hwpm_err(hwpm,
"perfmux_disable HAL uninitialized");
return -ENODEV;
}
floorsweep_info = (unsigned long)chip_ip->fs_mask;
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Release all perfmon associated with inst_idx */
for (perfmon_idx = 0U;
perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = hwpm->active_chip->perfmon_disable(
hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d disable failed",
ip_idx, perfmon_idx);
}
err = tegra_hwpm_perfmon_release(hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d release failed",
ip_idx, perfmon_idx);
}
}
/* Release all perfmux associated with inst_idx */
for (perfmux_idx = 0U;
perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = hwpm->active_chip->perfmux_disable(
hwpm, perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d disable failed",
ip_idx, perfmux_idx);
}
err = tegra_hwpm_perfmux_release(hwpm, perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d release failed",
ip_idx, perfmux_idx);
}
}
}
chip_ip->reserved = false;
}
return 0;
}
int tegra_hwpm_release_resources(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip->release_all_resources == NULL) {
tegra_hwpm_err(hwpm, "release_resources HAL uninitialized");
return -ENODEV;
}
ret = hwpm->active_chip->release_all_resources(hwpm);
ret = tegra_hwpm_release_all_resources(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "failed to release resources");
return ret;
@@ -93,23 +469,3 @@ int tegra_hwpm_release_resources(struct tegra_soc_hwpm *hwpm)
return 0;
}
int tegra_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->active_chip->bind_reserved_resources == NULL) {
tegra_hwpm_err(hwpm,
"bind_reserved_resources HAL uninitialized");
return -ENODEV;
}
ret = hwpm->active_chip->bind_reserved_resources(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "failed to bind resources");
return ret;
}
return 0;
}

View File

@@ -44,93 +44,7 @@ int t234_hwpm_zero_alist_regs(struct tegra_soc_hwpm *hwpm,
return 0;
}
int t234_hwpm_get_alist_size(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
u32 ip_idx;
u32 perfmux_idx, perfmon_idx;
unsigned long inst_idx = 0UL;
unsigned long floorsweep_info = 0UL;
struct hwpm_ip *chip_ip = NULL;
hwpm_ip_perfmux *perfmux = NULL;
hwpm_ip_perfmon *perfmon = NULL;
tegra_hwpm_fn(hwpm, " ");
for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* Skip unavailable IPs */
if (!chip_ip->reserved) {
continue;
}
if (chip_ip->fs_mask == 0U) {
/* No IP instance is available */
continue;
}
floorsweep_info = (unsigned long)chip_ip->fs_mask;
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Add perfmux alist size to full alist size */
for (perfmux_idx = 0U;
perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
if (perfmux->alist) {
hwpm->full_alist_size =
tegra_hwpm_safe_add_u64(
hwpm->full_alist_size,
perfmux->alist_size);
} else {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d NULL alist",
ip_idx, perfmux_idx);
}
}
/* Add perfmon alist size to full alist size */
for (perfmon_idx = 0U;
perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
if (perfmon->alist) {
hwpm->full_alist_size =
tegra_hwpm_safe_add_u64(
hwpm->full_alist_size,
perfmon->alist_size);
} else {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d NULL alist",
ip_idx, perfmon_idx);
}
}
}
}
return 0;
}
static int t234_hwpm_copy_alist(struct tegra_soc_hwpm *hwpm,
int t234_hwpm_copy_alist(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *aperture, u64 *full_alist,
u64 *full_alist_idx)
{
@@ -161,99 +75,6 @@ static int t234_hwpm_copy_alist(struct tegra_soc_hwpm *hwpm,
return 0;
}
int t234_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
u32 ip_idx;
u32 perfmux_idx, perfmon_idx;
unsigned long inst_idx = 0UL;
unsigned long floorsweep_info = 0UL;
struct hwpm_ip *chip_ip = NULL;
hwpm_ip_perfmux *perfmux = NULL;
hwpm_ip_perfmon *perfmon = NULL;
u64 full_alist_idx = 0;
int err = 0;
tegra_hwpm_fn(hwpm, " ");
for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* Skip unavailable IPs */
if (!chip_ip->reserved) {
continue;
}
if (chip_ip->fs_mask == 0U) {
/* No IP instance is available */
continue;
}
floorsweep_info = (unsigned long)chip_ip->fs_mask;
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Copy perfmux alist to full alist array */
for (perfmux_idx = 0U;
perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = t234_hwpm_copy_alist(hwpm, perfmux,
alist, &full_alist_idx);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d alist copy failed",
ip_idx, perfmux_idx);
goto fail;
}
}
/* Copy perfmon alist to full alist array */
for (perfmon_idx = 0U;
perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = t234_hwpm_copy_alist(hwpm, perfmon,
alist, &full_alist_idx);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d alist copy failed",
ip_idx, perfmon_idx);
goto fail;
}
}
}
}
/* Check size of full alist with hwpm->full_alist_size*/
if (full_alist_idx != hwpm->full_alist_size) {
tegra_hwpm_err(hwpm, "full_alist_size 0x%llx doesn't match "
"max full_alist_idx 0x%llx",
hwpm->full_alist_size, full_alist_idx);
err = -EINVAL;
}
fail:
return err;
}
bool t234_hwpm_check_alist(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *aperture, u64 phys_addr)
{

View File

@@ -24,270 +24,6 @@
#include <hal/t234/hw/t234_pmasys_soc_hwpm.h>
#include <hal/t234/hw/t234_pmmsys_soc_hwpm.h>
int t234_hwpm_reserve_pma(struct tegra_soc_hwpm *hwpm)
{
u32 perfmux_idx = 0U, perfmon_idx;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip_pma = active_chip->chip_ips[T234_HWPM_IP_PMA];
hwpm_ip_perfmux *pma_perfmux = NULL;
hwpm_ip_perfmon *pma_perfmon = NULL;
int ret = 0, err = 0;
tegra_hwpm_fn(hwpm, " ");
/* Make sure that PMA is not reserved */
if (chip_ip_pma->reserved == true) {
tegra_hwpm_err(hwpm, "PMA already reserved, ignoring");
return 0;
}
/* Reserve PMA perfmux */
for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots;
perfmux_idx++) {
pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx];
if (pma_perfmux == NULL) {
continue;
}
/* Since PMA is hwpm component, use perfmon reserve function */
ret = t234_hwpm_perfmon_reserve(hwpm, pma_perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmux %d reserve failed", perfmux_idx);
return ret;
}
chip_ip_pma->fs_mask |= pma_perfmux->hw_inst_mask;
}
/* Reserve PMA perfmons */
for (perfmon_idx = 0U; perfmon_idx < chip_ip_pma->num_perfmon_slots;
perfmon_idx++) {
pma_perfmon = chip_ip_pma->ip_perfmon[perfmon_idx];
if (pma_perfmon == NULL) {
continue;
}
ret = t234_hwpm_perfmon_reserve(hwpm, pma_perfmon);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmon %d reserve failed", perfmon_idx);
goto fail;
}
}
chip_ip_pma->reserved = true;
return 0;
fail:
for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots;
perfmux_idx++) {
pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx];
if (pma_perfmux == NULL) {
continue;
}
/* Since PMA is hwpm component, use perfmon release function */
err = t234_hwpm_perfmon_release(hwpm, pma_perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmux %d release failed", perfmux_idx);
}
chip_ip_pma->fs_mask &= ~(pma_perfmux->hw_inst_mask);
}
return ret;
}
int t234_hwpm_release_pma(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
u32 perfmux_idx, perfmon_idx;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip_pma = active_chip->chip_ips[T234_HWPM_IP_PMA];
hwpm_ip_perfmux *pma_perfmux = NULL;
hwpm_ip_perfmon *pma_perfmon = NULL;
tegra_hwpm_fn(hwpm, " ");
if (!chip_ip_pma->reserved) {
tegra_hwpm_dbg(hwpm, hwpm_info, "PMA wasn't mapped, ignoring.");
return 0;
}
/* Release PMA perfmux */
for (perfmux_idx = 0U; perfmux_idx < chip_ip_pma->num_perfmux_slots;
perfmux_idx++) {
pma_perfmux = chip_ip_pma->ip_perfmux[perfmux_idx];
if (pma_perfmux == NULL) {
continue;
}
/* Since PMA is hwpm component, use perfmon release function */
ret = t234_hwpm_perfmon_release(hwpm, pma_perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmux %d release failed", perfmux_idx);
return ret;
}
chip_ip_pma->fs_mask &= ~(pma_perfmux->hw_inst_mask);
}
/* Release PMA perfmons */
for (perfmon_idx = 0U; perfmon_idx < chip_ip_pma->num_perfmon_slots;
perfmon_idx++) {
pma_perfmon = chip_ip_pma->ip_perfmon[perfmon_idx];
if (pma_perfmon == NULL) {
continue;
}
ret = t234_hwpm_perfmon_release(hwpm, pma_perfmon);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"PMA perfmon %d release failed", perfmon_idx);
return ret;
}
}
chip_ip_pma->reserved = false;
return 0;
}
int t234_hwpm_reserve_rtr(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
u32 perfmux_idx = 0U, perfmon_idx;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip_rtr = active_chip->chip_ips[T234_HWPM_IP_RTR];
struct hwpm_ip *chip_ip_pma = active_chip->chip_ips[T234_HWPM_IP_PMA];
hwpm_ip_perfmux *pma_perfmux = chip_ip_pma->ip_perfmux[0U];
hwpm_ip_perfmux *rtr_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
/* Verify that PMA is reserved before RTR */
if (chip_ip_pma->reserved == false) {
tegra_hwpm_err(hwpm, "PMA should be reserved before RTR");
return -EINVAL;
}
/* Make sure that RTR is not reserved */
if (chip_ip_rtr->reserved == true) {
tegra_hwpm_err(hwpm, "RTR already reserved, ignoring");
return 0;
}
/* Reserve RTR perfmuxes */
for (perfmux_idx = 0U; perfmux_idx < chip_ip_rtr->num_perfmux_slots;
perfmux_idx++) {
rtr_perfmux = chip_ip_rtr->ip_perfmux[perfmux_idx];
if (rtr_perfmux == NULL) {
continue;
}
if (rtr_perfmux->start_abs_pa == pma_perfmux->start_abs_pa) {
/* This is PMA perfmux wrt RTR aperture */
rtr_perfmux->start_pa = pma_perfmux->start_pa;
rtr_perfmux->end_pa = pma_perfmux->end_pa;
rtr_perfmux->dt_mmio = pma_perfmux->dt_mmio;
if (hwpm->fake_registers_enabled) {
rtr_perfmux->fake_registers =
pma_perfmux->fake_registers;
}
} else {
/* Since RTR is hwpm component,
* use perfmon reserve function */
ret = t234_hwpm_perfmon_reserve(hwpm, rtr_perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"RTR perfmux %d reserve failed",
perfmux_idx);
return ret;
}
}
chip_ip_rtr->fs_mask |= rtr_perfmux->hw_inst_mask;
}
/* Reserve RTR perfmons */
for (perfmon_idx = 0U; perfmon_idx < chip_ip_rtr->num_perfmon_slots;
perfmon_idx++) {
/* No perfmons in RTR */
}
chip_ip_rtr->reserved = true;
return ret;
}
int t234_hwpm_release_rtr(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
u32 perfmux_idx, perfmon_idx;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip_rtr = active_chip->chip_ips[T234_HWPM_IP_RTR];
struct hwpm_ip *chip_ip_pma = active_chip->chip_ips[T234_HWPM_IP_PMA];
hwpm_ip_perfmux *pma_perfmux = chip_ip_pma->ip_perfmux[0U];
hwpm_ip_perfmux *rtr_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
/* Verify that PMA isn't released before RTR */
if (chip_ip_pma->reserved == false) {
tegra_hwpm_err(hwpm, "PMA shouldn't be released before RTR");
return -EINVAL;
}
if (!chip_ip_rtr->reserved) {
tegra_hwpm_dbg(hwpm, hwpm_info, "RTR wasn't mapped, ignoring.");
return 0;
}
/* Release RTR perfmux */
for (perfmux_idx = 0U; perfmux_idx < chip_ip_rtr->num_perfmux_slots;
perfmux_idx++) {
rtr_perfmux = chip_ip_rtr->ip_perfmux[perfmux_idx];
if (rtr_perfmux == NULL) {
continue;
}
if (rtr_perfmux->start_abs_pa == pma_perfmux->start_abs_pa) {
/* This is PMA perfmux wrt RTR aperture */
rtr_perfmux->start_pa = 0ULL;
rtr_perfmux->end_pa = 0ULL;
rtr_perfmux->dt_mmio = NULL;
if (hwpm->fake_registers_enabled) {
rtr_perfmux->fake_registers = NULL;
}
} else {
/* RTR is hwpm component, use perfmon release func */
ret = t234_hwpm_perfmon_release(hwpm, rtr_perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"RTR perfmux %d release failed",
perfmux_idx);
return ret;
}
}
chip_ip_rtr->fs_mask &= ~(rtr_perfmux->hw_inst_mask);
}
/* Release RTR perfmon */
for (perfmon_idx = 0U; perfmon_idx < chip_ip_rtr->num_perfmon_slots;
perfmon_idx++) {
/* No RTR perfmons */
}
chip_ip_rtr->reserved = false;
return 0;
}
int t234_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
@@ -296,15 +32,18 @@ int t234_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm)
u32 field_mask = 0U;
u32 field_val = 0U;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
/* Currently, RTR specific perfmux is added at index 0 */
hwpm_ip_perfmux *rtr_perfmux = &active_chip->chip_ips[
T234_HWPM_IP_RTR]->perfmux_static_array[0U];
hwpm_ip_perfmux *pma_perfmux = NULL;
hwpm_ip_perfmux *rtr_perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
/* Currently, PMA has only one perfmux */
pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
/* Currently, RTR specific perfmux is added at index 0 */
rtr_perfmux = &active_chip->chip_ips[
active_chip->get_rtr_int_idx(hwpm)]->perfmux_static_array[0U];
/* Disable PMA triggers */
reg_val = tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_trigger_config_user_r(0));
@@ -359,8 +98,8 @@ int t234_hwpm_init_prod_values(struct tegra_soc_hwpm *hwpm)
u32 reg_val = 0U;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");
@@ -397,8 +136,8 @@ int t234_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm)
return -ENODEV;
}
pma_ip = active_chip->chip_ips[T234_HWPM_IP_PMA];
rtr_ip = active_chip->chip_ips[T234_HWPM_IP_RTR];
pma_ip = active_chip->chip_ips[active_chip->get_pma_int_idx(hwpm)];
rtr_ip = active_chip->chip_ips[active_chip->get_rtr_int_idx(hwpm)];
if ((pma_ip == NULL) || !(pma_ip->reserved)) {
tegra_hwpm_err(hwpm, "PMA uninitialized");
@@ -411,7 +150,7 @@ int t234_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm)
}
/* Currently, PMA has only one perfmux */
pma_perfmux = pma_ip->ip_perfmux[0U];
pma_perfmux = &pma_ip->perfmux_static_array[0U];
/* Currently, RTR specific perfmux is added at index 0 */
rtr_perfmux = &rtr_ip->perfmux_static_array[0U];
@@ -451,8 +190,8 @@ int t234_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm)
return -ENODEV;
}
pma_ip = active_chip->chip_ips[T234_HWPM_IP_PMA];
rtr_ip = active_chip->chip_ips[T234_HWPM_IP_RTR];
pma_ip = active_chip->chip_ips[active_chip->get_pma_int_idx(hwpm)];
rtr_ip = active_chip->chip_ips[active_chip->get_rtr_int_idx(hwpm)];
if ((pma_ip == NULL) || !(pma_ip->reserved)) {
tegra_hwpm_err(hwpm, "PMA uninitialized");
@@ -465,7 +204,7 @@ int t234_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm)
}
/* Currently, PMA has only one perfmux */
pma_perfmux = pma_ip->ip_perfmux[0U];
pma_perfmux = &pma_ip->perfmux_static_array[0U];
/* Currently, RTR specific perfmux is added at index 0 */
rtr_perfmux = &rtr_ip->perfmux_static_array[0U];

View File

@@ -15,6 +15,7 @@
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_static_analysis.h>
#include <hal/t234/t234_hwpm_init.h>
@@ -27,22 +28,28 @@ struct tegra_soc_hwpm_chip t234_chip_info = {
.is_ip_active = t234_hwpm_is_ip_active,
.is_resource_active = t234_hwpm_is_resource_active,
.get_pma_int_idx = t234_get_pma_int_idx,
.get_rtr_int_idx = t234_get_rtr_int_idx,
.get_ip_max_idx = t234_get_ip_max_idx,
.init_chip_ip_structures = tegra_hwpm_init_chip_ip_structures,
.extract_ip_ops = t234_hwpm_extract_ip_ops,
.finalize_chip_info = t234_hwpm_finalize_chip_info,
.force_enable_ips = t234_hwpm_force_enable_ips,
.get_fs_info = t234_hwpm_get_fs_info,
.init_prod_values = t234_hwpm_init_prod_values,
.disable_slcg = t234_hwpm_disable_slcg,
.enable_slcg = t234_hwpm_enable_slcg,
.reserve_pma = t234_hwpm_reserve_pma,
.reserve_rtr = t234_hwpm_reserve_rtr,
.release_pma = t234_hwpm_release_pma,
.release_rtr = t234_hwpm_release_rtr,
.reserve_pma = tegra_hwpm_reserve_pma,
.reserve_rtr = tegra_hwpm_reserve_rtr,
.release_pma = tegra_hwpm_release_pma,
.release_rtr = tegra_hwpm_release_rtr,
.reserve_given_resource = t234_hwpm_reserve_given_resource,
.bind_reserved_resources = t234_hwpm_bind_reserved_resources,
.release_all_resources = t234_hwpm_release_all_resources,
.perfmon_enable = t234_hwpm_perfmon_enable,
.perfmon_disable = t234_hwpm_perfmon_disable,
.perfmux_disable = t234_hwpm_perfmux_disable,
.disable_triggers = t234_hwpm_disable_triggers,
.disable_mem_mgmt = t234_hwpm_disable_mem_mgmt,
@@ -56,13 +63,12 @@ struct tegra_soc_hwpm_chip t234_chip_info = {
.get_alist_buf_size = t234_hwpm_get_alist_buf_size,
.zero_alist_regs = t234_hwpm_zero_alist_regs,
.get_alist_size = t234_hwpm_get_alist_size,
.combine_alist = t234_hwpm_combine_alist,
.copy_alist = t234_hwpm_copy_alist,
.check_alist = t234_hwpm_check_alist,
.exec_reg_ops = t234_hwpm_exec_reg_ops,
.release_sw_setup = t234_hwpm_release_sw_setup,
.release_sw_setup = tegra_hwpm_release_sw_setup,
};
bool t234_hwpm_is_ip_active(struct tegra_soc_hwpm *hwpm,
@@ -263,142 +269,24 @@ bool t234_hwpm_is_resource_active(struct tegra_soc_hwpm *hwpm,
return (config_ip != TEGRA_SOC_HWPM_IP_INACTIVE);
}
static int t234_hwpm_init_ip_perfmux_apertures(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip *chip_ip)
u32 t234_get_pma_int_idx(struct tegra_soc_hwpm *hwpm)
{
u32 idx = 0U, perfmux_idx = 0U, max_perfmux = 0U;
u64 perfmux_address_range = 0ULL, perfmux_offset = 0ULL;
hwpm_ip_perfmux *perfmux = NULL;
/* Initialize perfmux array */
if (chip_ip->num_perfmux_per_inst == 0U) {
/* no perfmux in this IP */
return 0;
return T234_HWPM_IP_PMA;
}
perfmux_address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(chip_ip->perfmux_range_end,
chip_ip->perfmux_range_start), 1ULL);
chip_ip->num_perfmux_slots = tegra_hwpm_safe_cast_u64_to_u32(
perfmux_address_range / chip_ip->inst_perfmux_stride);
chip_ip->ip_perfmux = kzalloc(
sizeof(hwpm_ip_perfmux *) * chip_ip->num_perfmux_slots,
GFP_KERNEL);
if (chip_ip->ip_perfmux == NULL) {
tegra_hwpm_err(hwpm, "Perfmux pointer array allocation failed");
return -ENOMEM;
}
/* Set all perfmux slot pointers to NULL */
for (idx = 0U; idx < chip_ip->num_perfmux_slots; idx++) {
chip_ip->ip_perfmux[idx] = NULL;
}
/* Assign valid perfmuxes to corresponding slot pointers */
max_perfmux = chip_ip->num_instances * chip_ip->num_perfmux_per_inst;
for (perfmux_idx = 0U; perfmux_idx < max_perfmux; perfmux_idx++) {
perfmux = &chip_ip->perfmux_static_array[perfmux_idx];
/* Compute perfmux offset from perfmux range start */
perfmux_offset = tegra_hwpm_safe_sub_u64(
perfmux->start_abs_pa, chip_ip->perfmux_range_start);
/* Compute perfmux slot index */
idx = tegra_hwpm_safe_cast_u64_to_u32(
perfmux_offset / chip_ip->inst_perfmux_stride);
/* Set perfmux slot pointer */
chip_ip->ip_perfmux[idx] = perfmux;
}
return 0;
}
static int t234_hwpm_init_ip_perfmon_apertures(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip *chip_ip)
u32 t234_get_rtr_int_idx(struct tegra_soc_hwpm *hwpm)
{
u32 idx = 0U, perfmon_idx = 0U, max_perfmon = 0U;
u64 perfmon_address_range = 0ULL, perfmon_offset = 0ULL;
hwpm_ip_perfmon *perfmon = NULL;
/* Initialize perfmon array */
if (chip_ip->num_perfmon_per_inst == 0U) {
/* no perfmons in this IP */
return 0;
return T234_HWPM_IP_RTR;
}
perfmon_address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(chip_ip->perfmon_range_end,
chip_ip->perfmon_range_start), 1ULL);
chip_ip->num_perfmon_slots = tegra_hwpm_safe_cast_u64_to_u32(
perfmon_address_range / chip_ip->inst_perfmon_stride);
chip_ip->ip_perfmon = kzalloc(
sizeof(hwpm_ip_perfmon *) * chip_ip->num_perfmon_slots,
GFP_KERNEL);
if (chip_ip->ip_perfmon == NULL) {
tegra_hwpm_err(hwpm, "Perfmon pointer array allocation failed");
return -ENOMEM;
}
/* Set all perfmon slot pointers to NULL */
for (idx = 0U; idx < chip_ip->num_perfmon_slots; idx++) {
chip_ip->ip_perfmon[idx] = NULL;
}
/* Assign valid perfmuxes to corresponding slot pointers */
max_perfmon = chip_ip->num_instances * chip_ip->num_perfmon_per_inst;
for (perfmon_idx = 0U; perfmon_idx < max_perfmon; perfmon_idx++) {
perfmon = &chip_ip->perfmon_static_array[perfmon_idx];
/* Compute perfmon offset from perfmon range start */
perfmon_offset = tegra_hwpm_safe_sub_u64(
perfmon->start_abs_pa, chip_ip->perfmon_range_start);
/* Compute perfmon slot index */
idx = tegra_hwpm_safe_cast_u64_to_u32(
perfmon_offset / chip_ip->inst_perfmon_stride);
/* Set perfmon slot pointer */
chip_ip->ip_perfmon[idx] = perfmon;
}
return 0;
}
static int t234_hwpm_init_chip_ip_structures(struct tegra_soc_hwpm *hwpm)
u32 t234_get_ip_max_idx(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
u32 ip_idx;
int ret = 0;
for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
ret = t234_hwpm_init_ip_perfmon_apertures(hwpm, chip_ip);
if (ret != 0) {
tegra_hwpm_err(hwpm, "IP %d perfmon alloc failed",
ip_idx);
return ret;
}
ret = t234_hwpm_init_ip_perfmux_apertures(hwpm, chip_ip);
if (ret != 0) {
tegra_hwpm_err(hwpm, "IP %d perfmux alloc failed",
ip_idx);
return ret;
}
}
return 0;
return T234_HWPM_IP_MAX;
}
int t234_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm)
{
struct hwpm_ip **t234_active_ip_info;
int ret = 0;
/* Allocate array of pointers to hold active IP structures */
t234_chip_info.chip_ips =
@@ -465,33 +353,6 @@ int t234_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm)
#if defined(CONFIG_SOC_HWPM_IP_VIC)
t234_active_ip_info[T234_HWPM_IP_VIC] = &t234_hwpm_ip_vic;
#endif
ret = t234_hwpm_init_chip_ip_structures(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "IP structure init failed");
return ret;
}
return 0;
}
void t234_hwpm_release_sw_setup(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
u32 ip_idx;
for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* Release perfmux array */
if (chip_ip->num_perfmux_per_inst != 0U) {
kfree(chip_ip->ip_perfmux);
}
/* Release perfmon array */
if (chip_ip->num_perfmon_per_inst != 0U) {
kfree(chip_ip->ip_perfmon);
}
}
return;
}

View File

@@ -73,9 +73,13 @@ bool t234_hwpm_is_ip_active(struct tegra_soc_hwpm *hwpm,
bool t234_hwpm_is_resource_active(struct tegra_soc_hwpm *hwpm,
u32 res_index, u32 *config_ip_index);
u32 t234_get_pma_int_idx(struct tegra_soc_hwpm *hwpm);
u32 t234_get_rtr_int_idx(struct tegra_soc_hwpm *hwpm);
u32 t234_get_ip_max_idx(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_extract_ip_ops(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops, bool available);
int t234_hwpm_finalize_chip_info(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_get_fs_info(struct tegra_soc_hwpm *hwpm,
u32 ip_index, u64 *fs_mask, u8 *ip_status);
@@ -83,19 +87,13 @@ int t234_hwpm_init_prod_values(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_disable_slcg(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_enable_slcg(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_reserve_pma(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_reserve_rtr(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_release_pma(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_release_rtr(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_perfmon_reserve(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon);
int t234_hwpm_perfmon_release(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon);
int t234_hwpm_reserve_given_resource(struct tegra_soc_hwpm *hwpm, u32 ip_idx);
int t234_hwpm_bind_reserved_resources(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_release_all_resources(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_perfmon_enable(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon);
int t234_hwpm_perfmux_disable(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux);
int t234_hwpm_perfmon_disable(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon);
int t234_hwpm_disable_mem_mgmt(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
@@ -111,15 +109,13 @@ bool t234_hwpm_membuf_overflow_status(struct tegra_soc_hwpm *hwpm);
size_t t234_hwpm_get_alist_buf_size(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_zero_alist_regs(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *aperture);
int t234_hwpm_get_alist_size(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist);
int t234_hwpm_copy_alist(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *aperture, u64 *full_alist,
u64 *full_alist_idx);
bool t234_hwpm_check_alist(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *aperture, u64 phys_addr);
int t234_hwpm_exec_reg_ops(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_reg_op *reg_op);
void t234_hwpm_release_sw_setup(struct tegra_soc_hwpm *hwpm);
#endif /* T234_HWPM_INTERNAL_H */

View File

@@ -15,265 +15,16 @@
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_static_analysis.h>
#include <hal/t234/t234_hwpm_internal.h>
#include <hal/t234/hw/t234_addr_map_soc_hwpm.h>
/*
* Currently, all IPs do not self register to the hwpm driver
* This function is used to force set floorsweep mask for IPs which
* contain perfmon only (eg. SCF)
*/
static int t234_hwpm_update_floorsweep_mask_using_perfmon(
struct tegra_soc_hwpm *hwpm,
u32 ip_idx, u32 ip_perfmon_idx, bool available)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx];
hwpm_ip_perfmon *perfmon = NULL;
tegra_hwpm_fn(hwpm, " ");
if (chip_ip->override_enable) {
/* This IP shouldn't be configured, ignore this request */
return 0;
}
perfmon = chip_ip->ip_perfmon[ip_perfmon_idx];
if (perfmon == NULL) {
tegra_hwpm_err(hwpm,
"IP %d perfmon_idx %d not populated as expected",
ip_idx, ip_perfmon_idx);
return -EINVAL;
}
/* Update floorsweep info */
if (available) {
chip_ip->fs_mask |= perfmon->hw_inst_mask;
} else {
chip_ip->fs_mask &= ~(perfmon->hw_inst_mask);
}
return 0;
}
static int t234_hwpm_update_floorsweep_mask(struct tegra_soc_hwpm *hwpm,
u32 ip_idx, u32 ip_perfmux_idx, bool available)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx];
hwpm_ip_perfmux *perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
if (chip_ip->override_enable) {
/* This IP shouldn't be configured, ignore this request */
return 0;
}
perfmux = chip_ip->ip_perfmux[ip_perfmux_idx];
if (perfmux == NULL) {
tegra_hwpm_err(hwpm,
"IP %d perfmux_idx %d not populated as expected",
ip_idx, ip_perfmux_idx);
return -EINVAL;
}
/* Update floorsweep info */
if (available) {
chip_ip->fs_mask |= perfmux->hw_inst_mask;
} else {
chip_ip->fs_mask &= ~(perfmux->hw_inst_mask);
}
return 0;
}
static int t234_hwpm_update_ip_ops_info(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops,
u32 ip_idx, u32 ip_perfmux_idx, bool available)
{
u32 perfmux_idx, max_num_perfmux = 0U;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx];
struct tegra_hwpm_ip_ops *ip_ops;
hwpm_ip_perfmux *given_perfmux = chip_ip->ip_perfmux[ip_perfmux_idx];
hwpm_ip_perfmux *perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
if (chip_ip->override_enable) {
/* This IP shouldn't be configured, ignore this request */
return 0;
}
if (given_perfmux == NULL) {
tegra_hwpm_err(hwpm,
"IP %d given_perfmux idx %d not populated as expected",
ip_idx, ip_perfmux_idx);
return -EINVAL;
}
/* Update IP ops info for all perfmux in the instance */
max_num_perfmux = tegra_hwpm_safe_mult_u32(
chip_ip->num_instances, chip_ip->num_perfmux_per_inst);
for (perfmux_idx = 0U; perfmux_idx < max_num_perfmux; perfmux_idx++) {
perfmux = &chip_ip->perfmux_static_array[perfmux_idx];
if (perfmux->hw_inst_mask != given_perfmux->hw_inst_mask) {
continue;
}
ip_ops = &perfmux->ip_ops;
if (available) {
ip_ops->ip_dev = hwpm_ip_ops->ip_dev;
ip_ops->hwpm_ip_pm = hwpm_ip_ops->hwpm_ip_pm;
ip_ops->hwpm_ip_reg_op = hwpm_ip_ops->hwpm_ip_reg_op;
} else {
ip_ops->ip_dev = NULL;
ip_ops->hwpm_ip_pm = NULL;
ip_ops->hwpm_ip_reg_op = NULL;
}
}
return 0;
}
static int t234_hwpm_fs_and_ip_ops(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops,
u32 ip_idx, u32 perfmux_idx, bool available)
{
int ret = -EINVAL;
tegra_hwpm_fn(hwpm, " ");
ret = t234_hwpm_update_floorsweep_mask(
hwpm, ip_idx, perfmux_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d: Failed to update FS mask",
ip_idx, perfmux_idx);
goto fail;
}
ret = t234_hwpm_update_ip_ops_info(hwpm, hwpm_ip_ops,
ip_idx, perfmux_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d: Failed to update ip_ops",
ip_idx, perfmux_idx);
goto fail;
}
fail:
return ret;
}
/*
* This function finds the IP perfmux index corresponding to given base address.
* Perfmux aperture belongs to IP domain and contains IP instance info
* wrt base address.
* Return instance index
*/
static int t234_hwpm_find_ip_perfmux_index(struct tegra_soc_hwpm *hwpm,
u64 base_addr, u32 ip_index, u32 *ip_perfmux_idx)
{
struct tegra_soc_hwpm_chip *active_chip = NULL;
struct hwpm_ip *chip_ip = NULL;
u32 perfmux_idx;
u64 addr_offset = 0ULL;
hwpm_ip_perfmux *perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
if (ip_perfmux_idx == NULL) {
tegra_hwpm_err(hwpm, "pointer for ip_perfmux_idx is NULL");
return -EINVAL;
}
if (hwpm->active_chip == NULL) {
tegra_hwpm_err(hwpm, "chip struct not populated");
return -ENODEV;
}
active_chip = hwpm->active_chip;
if (ip_index == TEGRA_SOC_HWPM_IP_INACTIVE) {
tegra_hwpm_err(hwpm, "invalid ip_index %d", ip_index);
return -EINVAL;
}
chip_ip = active_chip->chip_ips[ip_index];
if (chip_ip == NULL) {
tegra_hwpm_err(hwpm, "IP %d not populated", ip_index);
return -ENODEV;
}
if (chip_ip->override_enable) {
/* This IP should not be configured for HWPM */
tegra_hwpm_dbg(hwpm, hwpm_info,
"IP %d enable override", ip_index);
return 0; /* Should this be notified to caller or ignored */
}
/* Validate phys_addr falls in IP address range */
if ((base_addr < chip_ip->perfmux_range_start) ||
(base_addr > chip_ip->perfmux_range_end)) {
tegra_hwpm_dbg(hwpm, hwpm_info,
"phys address 0x%llx not in IP %d",
base_addr, ip_index);
return -ENODEV;
}
/* Find IP instance for given phys_address */
/*
* Since all IP instances are configured to be in consecutive memory,
* instance index can be found using instance physical address stride.
*/
addr_offset = tegra_hwpm_safe_sub_u64(
base_addr, chip_ip->perfmux_range_start);
perfmux_idx = tegra_hwpm_safe_cast_u64_to_u32(
addr_offset / chip_ip->inst_perfmux_stride);
/* Make sure instance index is valid */
if (perfmux_idx >= chip_ip->num_perfmux_slots) {
tegra_hwpm_err(hwpm,
"IP:%d -> base addr 0x%llx is out of bounds",
ip_index, base_addr);
return -EINVAL;
}
/* Validate IP instance perfmux start address = given phys addr */
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
/*
* This a valid case as not all MSS base addresses are shared
* between MSS IPs.
*/
tegra_hwpm_dbg(hwpm, hwpm_info,
"For addr 0x%llx IP %d perfmux_idx %d not populated",
base_addr, ip_index, perfmux_idx);
return -ENODEV;
}
if (base_addr != perfmux->start_abs_pa) {
tegra_hwpm_dbg(hwpm, hwpm_info,
"base addr 0x%llx != perfmux abs addr", base_addr);
return -EINVAL;
}
*ip_perfmux_idx = perfmux_idx;
return 0;
}
int t234_hwpm_extract_ip_ops(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops, bool available)
{
int ret = 0;
u32 perfmux_idx = 0U;
u32 ip_idx = 0U;
tegra_hwpm_fn(hwpm, " ");
@@ -301,111 +52,85 @@ int t234_hwpm_extract_ip_ops(struct tegra_soc_hwpm *hwpm,
case T234_HWPM_IP_PCIE:
case T234_HWPM_IP_DISPLAY:
case T234_HWPM_IP_MSS_GPU_HUB:
/* Get IP info */
ret = t234_hwpm_find_ip_perfmux_index(hwpm,
hwpm_ip_ops->ip_base_address, ip_idx, &perfmux_idx);
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, hwpm_ip_ops,
hwpm_ip_ops->ip_base_address, ip_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d base 0x%llx no perfmux match",
ip_idx, hwpm_ip_ops->ip_base_address);
goto fail;
}
ret = t234_hwpm_fs_and_ip_ops(hwpm, hwpm_ip_ops,
ip_idx, perfmux_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"Failed to %s fs/ops for IP %d perfmux %d",
"Failed to %s fs/ops for IP %d (base 0x%llx)",
available == true ? "set" : "reset",
ip_idx, perfmux_idx);
ip_idx, hwpm_ip_ops->ip_base_address);
goto fail;
}
break;
case T234_HWPM_IP_MSS_CHANNEL:
case T234_HWPM_IP_MSS_ISO_NISO_HUBS:
case T234_HWPM_IP_MSS_MCF:
/* MSS channel, ISO NISO hubs and MCF share MC channels */
/* Check base address in T234_HWPM_IP_MSS_CHANNEL */
ip_idx = T234_HWPM_IP_MSS_CHANNEL;
ret = t234_hwpm_find_ip_perfmux_index(hwpm,
hwpm_ip_ops->ip_base_address, ip_idx, &perfmux_idx);
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, hwpm_ip_ops,
hwpm_ip_ops->ip_base_address, ip_idx, available);
if (ret != 0) {
/*
* Return value of ENODEV will indicate that the base
* address doesn't belong to this IP.
* This case is valid, as not all base addresses are
* shared between MSS IPs.
* Hence, reset return value to 0.
* In this case, reset return value to 0.
*/
if (ret != -ENODEV) {
goto fail;
}
ret = 0;
} else {
ret = t234_hwpm_fs_and_ip_ops(hwpm, hwpm_ip_ops,
ip_idx, perfmux_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d: fs/ops %s failed",
ip_idx, perfmux_idx,
"IP %d base 0x%llx:Failed to %s fs/ops",
ip_idx, hwpm_ip_ops->ip_base_address,
available == true ? "set" : "reset");
goto fail;
}
ret = 0;
}
/* Check base address in T234_HWPM_IP_MSS_ISO_NISO_HUBS */
ip_idx = T234_HWPM_IP_MSS_ISO_NISO_HUBS;
ret = t234_hwpm_find_ip_perfmux_index(hwpm,
hwpm_ip_ops->ip_base_address, ip_idx, &perfmux_idx);
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, hwpm_ip_ops,
hwpm_ip_ops->ip_base_address, ip_idx, available);
if (ret != 0) {
/*
* Return value of ENODEV will indicate that the base
* address doesn't belong to this IP.
* This case is valid, as not all base addresses are
* shared between MSS IPs.
* Hence, reset return value to 0.
* In this case, reset return value to 0.
*/
if (ret != -ENODEV) {
goto fail;
}
ret = 0;
} else {
ret = t234_hwpm_fs_and_ip_ops(hwpm, hwpm_ip_ops,
ip_idx, perfmux_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d: fs/ops %s failed",
ip_idx, perfmux_idx,
"IP %d base 0x%llx:Failed to %s fs/ops",
ip_idx, hwpm_ip_ops->ip_base_address,
available == true ? "set" : "reset");
goto fail;
}
ret = 0;
}
/* Check base address in T234_HWPM_IP_MSS_MCF */
ip_idx = T234_HWPM_IP_MSS_MCF;
ret = t234_hwpm_find_ip_perfmux_index(hwpm,
hwpm_ip_ops->ip_base_address, ip_idx, &perfmux_idx);
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, hwpm_ip_ops,
hwpm_ip_ops->ip_base_address, ip_idx, available);
if (ret != 0) {
/*
* Return value of ENODEV will indicate that the base
* address doesn't belong to this IP.
* This case is valid, as not all base addresses are
* shared between MSS IPs.
* Hence, reset return value to 0.
* In this case, reset return value to 0.
*/
if (ret != -ENODEV) {
goto fail;
}
ret = 0;
} else {
ret = t234_hwpm_fs_and_ip_ops(hwpm, hwpm_ip_ops,
ip_idx, perfmux_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d: fs/ops %s failed",
ip_idx, perfmux_idx,
"IP %d base 0x%llx:Failed to %s fs/ops",
ip_idx, hwpm_ip_ops->ip_base_address,
available == true ? "set" : "reset");
goto fail;
}
ret = 0;
}
break;
case T234_HWPM_IP_PMA:
@@ -419,40 +144,7 @@ fail:
return ret;
}
/*
* Find IP perfmux index and set corresponding floorsweep info.
*/
int t234_hwpm_set_fs_info(struct tegra_soc_hwpm *hwpm, u64 base_address,
u32 ip_idx, bool available)
{
int ret = 0;
u32 perfmux_idx = 0U;
tegra_hwpm_fn(hwpm, " ");
ret = t234_hwpm_find_ip_perfmux_index(hwpm,
base_address, ip_idx, &perfmux_idx);
if (ret != 0) {
tegra_hwpm_err(hwpm, "IP %d base 0x%llx no perfmux match",
ip_idx, base_address);
goto fail;
}
/* TODO: Check if force enable is required */
ret = t234_hwpm_update_floorsweep_mask(
hwpm, ip_idx, perfmux_idx, available);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d base 0x%llx: FS mask update failed",
ip_idx, perfmux_idx, base_address);
goto fail;
}
fail:
return ret;
}
static int t234_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm)
int t234_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm)
{
u32 i = 0U;
int ret = 0;
@@ -462,67 +154,63 @@ static int t234_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm)
tegra_hwpm_fn(hwpm, " ");
if (tegra_platform_is_vsp()) {
/* Modules enabled only in L4T and not hypervisor config*/
/* As HWPM support on hypervisor is pushed to mainline*/
/* The below IPs are disabled on hypervisor currently */
if (!is_tegra_hypervisor_mode()) {
/* Static IP instances as per VSP netlist */
/* MSS CHANNEL: vsp has single instance available */
ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc0_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc0_base_r(),
T234_HWPM_IP_MSS_CHANNEL, true);
if (ret != 0) {
goto fail;
}
/* MSS GPU HUB */
ret = t234_hwpm_set_fs_info(hwpm,
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mss_nvlink_1_base_r(),
T234_HWPM_IP_MSS_GPU_HUB, true);
if (ret != 0) {
goto fail;
}
}
}
if (tegra_platform_is_silicon()) {
/* Modules enabled only in L4T and not hypervisor config*/
/* As HWPM support on hypervisor is pushed to mainline*/
/* The below IPs are disabled on hypervisor currently */
if (!is_tegra_hypervisor_mode()) {
/* Static IP instances corresponding to silicon */
/* VI */
/*ret = t234_hwpm_set_fs_info(hwpm, addr_map_vi_thi_base_r(),
/*ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_vi_thi_base_r(),
T234_HWPM_IP_VI, true);
if (ret != 0) {
goto fail;
}
ret = t234_hwpm_set_fs_info(hwpm, addr_map_vi2_thi_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_vi2_thi_base_r(),
T234_HWPM_IP_VI, true);
if (ret != 0) {
goto fail;
}*/
/* ISP */
ret = t234_hwpm_set_fs_info(hwpm, addr_map_isp_thi_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_isp_thi_base_r(),
T234_HWPM_IP_ISP, true);
if (ret != 0) {
goto fail;
}
/* PVA */
ret = t234_hwpm_set_fs_info(hwpm, addr_map_pva0_pm_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_pva0_pm_base_r(),
T234_HWPM_IP_PVA, true);
if (ret != 0) {
goto fail;
}
/* NVDLA */
ret = t234_hwpm_set_fs_info(hwpm,
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_nvdla0_base_r(),
T234_HWPM_IP_NVDLA, true);
if (ret != 0) {
goto fail;
}
ret = t234_hwpm_set_fs_info(hwpm,
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_nvdla1_base_r(),
T234_HWPM_IP_NVDLA, true);
if (ret != 0) {
@@ -530,7 +218,7 @@ static int t234_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm)
}
/* MGBE */
/*ret = t234_hwpm_set_fs_info(hwpm,
/*ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mgbe0_mac_rm_base_r(),
T234_HWPM_IP_MGBE, true);
if (ret != 0) {
@@ -538,35 +226,35 @@ static int t234_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm)
}*/
/* SCF */
ret = t234_hwpm_update_floorsweep_mask_using_perfmon(hwpm,
T234_HWPM_IP_SCF, 0U, true);
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_rpg_pm_scf_base_r(),
T234_HWPM_IP_SCF, true);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"T234_HWPM_IP_SCF: FS mask update failed");
goto fail;
}
/* NVDEC */
ret = t234_hwpm_set_fs_info(hwpm, addr_map_nvdec_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_nvdec_base_r(),
T234_HWPM_IP_NVDEC, true);
if (ret != 0) {
goto fail;
}
/* PCIE */
/*ret = t234_hwpm_set_fs_info(hwpm,
/*ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_pcie_c1_ctl_base_r(),
T234_HWPM_IP_PCIE, true);
if (ret != 0) {
goto fail;
}
ret = t234_hwpm_set_fs_info(hwpm,
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_pcie_c4_ctl_base_r(),
T234_HWPM_IP_PCIE, true);
if (ret != 0) {
goto fail;
}
ret = t234_hwpm_set_fs_info(hwpm,
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_pcie_c5_ctl_base_r(),
T234_HWPM_IP_PCIE, true);
if (ret != 0) {
@@ -574,60 +262,66 @@ static int t234_hwpm_force_enable_ips(struct tegra_soc_hwpm *hwpm)
}*/
/* DISPLAY */
/*ret = t234_hwpm_set_fs_info(hwpm, addr_map_disp_base_r(),
/*ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_disp_base_r(),
T234_HWPM_IP_DISPLAY, true);
if (ret != 0) {
goto fail;
}*/
/* MSS CHANNEL */
ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc0_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc0_base_r(),
T234_HWPM_IP_MSS_CHANNEL, true);
if (ret != 0) {
goto fail;
}
ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc4_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc4_base_r(),
T234_HWPM_IP_MSS_CHANNEL, true);
if (ret != 0) {
goto fail;
}
ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc8_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc8_base_r(),
T234_HWPM_IP_MSS_CHANNEL, true);
if (ret != 0) {
goto fail;
}
ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc12_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc12_base_r(),
T234_HWPM_IP_MSS_CHANNEL, true);
if (ret != 0) {
goto fail;
}
/* MSS ISO NISO HUBS */
ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc0_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc0_base_r(),
T234_HWPM_IP_MSS_ISO_NISO_HUBS, true);
if (ret != 0) {
goto fail;
}
/* MSS MCF */
ret = t234_hwpm_set_fs_info(hwpm, addr_map_mc0_base_r(),
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mc0_base_r(),
T234_HWPM_IP_MSS_MCF, true);
if (ret != 0) {
goto fail;
}
/* MSS GPU HUB */
ret = t234_hwpm_set_fs_info(hwpm,
ret = tegra_hwpm_set_fs_info_ip_ops(hwpm, NULL,
addr_map_mss_nvlink_1_base_r(),
T234_HWPM_IP_MSS_GPU_HUB, true);
if (ret != 0) {
goto fail;
}
}
}
tegra_hwpm_dbg(hwpm, hwpm_verbose, "IP floorsweep info:");
for (i = 0U; i < T234_HWPM_IP_MAX; i++) {
for (i = 0U; i < active_chip->get_ip_max_idx(hwpm); i++) {
chip_ip = active_chip->chip_ips[i];
tegra_hwpm_dbg(hwpm, hwpm_verbose, "IP:%d fs_mask:0x%x",
i, chip_ip->fs_mask);
@@ -637,51 +331,6 @@ fail:
return ret;
}
static int t234_hwpm_complete_ip_register(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
struct hwpm_ip_register_list *node = ip_register_list_head;
tegra_hwpm_fn(hwpm, " ");
while (node != NULL) {
tegra_hwpm_dbg(hwpm, hwpm_info, "IP ext idx %d info",
node->ip_ops.ip_index);
ret = t234_hwpm_extract_ip_ops(hwpm, &node->ip_ops, true);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to extract IP ops");
return ret;
}
node = node->next;
}
return ret;
}
/*
* Some IPs don't register with HWPM driver at the moment. Force set available
* instances of such IPs.
*/
int t234_hwpm_finalize_chip_info(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
ret = t234_hwpm_complete_ip_register(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed register IPs");
return ret;
}
ret = t234_hwpm_force_enable_ips(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to force enable IPs");
return ret;
}
return ret;
}
int t234_hwpm_get_fs_info(struct tegra_soc_hwpm *hwpm,
u32 ip_index, u64 *fs_mask, u8 *ip_status)
{

View File

@@ -27,8 +27,8 @@ int t234_hwpm_disable_mem_mgmt(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");
@@ -51,8 +51,8 @@ int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
u32 mem_bytes_addr = 0;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");
@@ -92,8 +92,8 @@ int t234_hwpm_invalidate_mem_config(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");
@@ -110,8 +110,8 @@ int t234_hwpm_stream_mem_bytes(struct tegra_soc_hwpm *hwpm)
u32 *mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel);
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");
@@ -133,8 +133,8 @@ int t234_hwpm_disable_pma_streaming(struct tegra_soc_hwpm *hwpm)
u32 reg_val = 0U;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");
@@ -163,8 +163,8 @@ int t234_hwpm_update_mem_bytes_get_ptr(struct tegra_soc_hwpm *hwpm,
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");
@@ -183,8 +183,8 @@ u64 t234_hwpm_get_mem_bytes_put_ptr(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");
@@ -197,8 +197,8 @@ bool t234_hwpm_membuf_overflow_status(struct tegra_soc_hwpm *hwpm)
u32 reg_val, field_val;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
/* Currently, PMA has only one perfmux */
hwpm_ip_perfmux *pma_perfmux =
active_chip->chip_ips[T234_HWPM_IP_PMA]->ip_perfmux[0U];
hwpm_ip_perfmux *pma_perfmux = &active_chip->chip_ips[
active_chip->get_pma_int_idx(hwpm)]->perfmux_static_array[0U];
tegra_hwpm_fn(hwpm, " ");

View File

@@ -187,7 +187,8 @@ static int t234_hwpm_find_aperture(struct tegra_soc_hwpm *hwpm,
active_chip = hwpm->active_chip;
/* Find IP index */
for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) {
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
if (chip_ip == NULL) {
tegra_hwpm_err(hwpm, "IP %d not populated as expected",

View File

@@ -16,7 +16,6 @@
#include <linux/of_address.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_static_analysis.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
@@ -26,7 +25,7 @@
#include <hal/t234/hw/t234_pmasys_soc_hwpm.h>
#include <hal/t234/hw/t234_pmmsys_soc_hwpm.h>
static int t234_hwpm_perfmon_enable(struct tegra_soc_hwpm *hwpm,
int t234_hwpm_perfmon_enable(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon)
{
u32 reg_val;
@@ -47,57 +46,7 @@ static int t234_hwpm_perfmon_enable(struct tegra_soc_hwpm *hwpm,
return 0;
}
static int t234_hwpm_perfmux_reserve(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux)
{
int err = 0;
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
/*
* Indicate that HWPM driver is initializing monitoring.
* Since perfmux is controlled by IP, indicate monitoring enabled
* by disabling IP power management.
*/
/* Make sure that ip_ops are initialized */
if ((perfmux->ip_ops.ip_dev != NULL) &&
(perfmux->ip_ops.hwpm_ip_pm != NULL)) {
err = (*perfmux->ip_ops.hwpm_ip_pm)(
perfmux->ip_ops.ip_dev, true);
if (err != 0) {
tegra_hwpm_err(hwpm, "Runtime PM disable failed");
}
} else {
tegra_hwpm_dbg(hwpm, hwpm_verbose, "Runtime PM not configured");
}
perfmux->start_pa = perfmux->start_abs_pa;
perfmux->end_pa = perfmux->end_abs_pa;
/* Allocate fake registers */
if (hwpm->fake_registers_enabled) {
u64 address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(
perfmux->end_pa, perfmux->start_pa), 1ULL);
u64 num_regs = address_range / sizeof(u32);
u32 **fake_regs = &perfmux->fake_registers;
*fake_regs = (u32 *)kzalloc(sizeof(u32) * num_regs, GFP_KERNEL);
if (!(*fake_regs)) {
tegra_hwpm_err(hwpm, "Aperture(0x%llx - 0x%llx):"
" Couldn't allocate memory for fake registers",
perfmux->start_pa, perfmux->end_pa);
ret = -ENOMEM;
goto fail;
}
}
fail:
return ret;
}
static int t234_hwpm_perfmux_disable(struct tegra_soc_hwpm *hwpm,
int t234_hwpm_perfmux_disable(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux)
{
int err = 0;
@@ -124,65 +73,7 @@ static int t234_hwpm_perfmux_disable(struct tegra_soc_hwpm *hwpm,
return 0;
}
static int t234_hwpm_perfmux_release(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux)
{
tegra_hwpm_fn(hwpm, " ");
/*
* Release
* This is only required for for fake registers
*/
if (perfmux->fake_registers) {
kfree(perfmux->fake_registers);
perfmux->fake_registers = NULL;
}
return 0;
}
int t234_hwpm_perfmon_reserve(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon)
{
struct resource *res = NULL;
tegra_hwpm_fn(hwpm, " ");
/* Reserve */
res = platform_get_resource_byname(hwpm->pdev,
IORESOURCE_MEM, perfmon->name);
if ((!res) || (res->start == 0) || (res->end == 0)) {
tegra_hwpm_err(hwpm, "Failed to get perfmon %s", perfmon->name);
return -ENOMEM;
}
perfmon->dt_mmio = devm_ioremap(hwpm->dev, res->start,
resource_size(res));
if (IS_ERR(perfmon->dt_mmio)) {
tegra_hwpm_err(hwpm, "Couldn't map perfmon %s", perfmon->name);
return PTR_ERR(perfmon->dt_mmio);
}
perfmon->start_pa = res->start;
perfmon->end_pa = res->end;
if (hwpm->fake_registers_enabled) {
u64 address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(res->end, res->start), 1ULL);
u64 num_regs = address_range / sizeof(u32);
perfmon->fake_registers = (u32 *)kzalloc(sizeof(u32) * num_regs,
GFP_KERNEL);
if (perfmon->fake_registers == NULL) {
tegra_hwpm_err(hwpm, "Perfmon (0x%llx - 0x%llx) "
"Couldn't allocate memory for fake regs",
perfmon->start_abs_pa, perfmon->end_abs_pa);
return -ENOMEM;
}
}
return 0;
}
static int t234_hwpm_perfmon_disable(struct tegra_soc_hwpm *hwpm,
int t234_hwpm_perfmon_disable(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon)
{
u32 reg_val;
@@ -200,353 +91,3 @@ static int t234_hwpm_perfmon_disable(struct tegra_soc_hwpm *hwpm,
return 0;
}
int t234_hwpm_perfmon_release(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon)
{
tegra_hwpm_fn(hwpm, " ");
if (perfmon->dt_mmio == NULL) {
tegra_hwpm_err(hwpm, "Perfmon was not mapped");
return -EINVAL;
}
devm_iounmap(hwpm->dev, perfmon->dt_mmio);
perfmon->dt_mmio = NULL;
perfmon->start_pa = 0ULL;
perfmon->end_pa = 0ULL;
if (perfmon->fake_registers) {
kfree(perfmon->fake_registers);
perfmon->fake_registers = NULL;
}
return 0;
}
int t234_hwpm_release_all_resources(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
hwpm_ip_perfmon *perfmon = NULL;
hwpm_ip_perfmux *perfmux = NULL;
u32 ip_idx;
u32 perfmux_idx, perfmon_idx;
unsigned long floorsweep_info = 0UL;
unsigned long inst_idx = 0UL;
int err = 0;
tegra_hwpm_fn(hwpm, " ");
for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* PMA and RTR will be released later */
if ((ip_idx == T234_HWPM_IP_PMA) ||
(ip_idx == T234_HWPM_IP_RTR)) {
continue;
}
/* Disable only available IPs */
if (chip_ip->override_enable) {
/* IP not available */
continue;
}
/* Disable and release only reserved IPs */
if (!chip_ip->reserved) {
continue;
}
if (chip_ip->fs_mask == 0U) {
/* No IP instance is available */
continue;
}
floorsweep_info = (unsigned long)chip_ip->fs_mask;
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Release all perfmon associated with inst_idx */
for (perfmon_idx = 0U;
perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = t234_hwpm_perfmon_disable(hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d disable failed",
ip_idx, perfmon_idx);
}
err = t234_hwpm_perfmon_release(hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d release failed",
ip_idx, perfmon_idx);
}
}
/* Release all perfmux associated with inst_idx */
for (perfmux_idx = 0U;
perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = t234_hwpm_perfmux_disable(hwpm, perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d disable failed",
ip_idx, perfmux_idx);
}
err = t234_hwpm_perfmux_release(hwpm, perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d release failed",
ip_idx, perfmux_idx);
}
}
}
chip_ip->reserved = false;
}
return 0;
}
/* ip_idx is wrt enum t234_hwpm_active_ips */
int t234_hwpm_reserve_given_resource(struct tegra_soc_hwpm *hwpm, u32 ip_idx)
{
int err = 0, ret = 0;
u32 perfmux_idx, perfmon_idx;
unsigned long inst_idx = 0UL;
unsigned long floorsweep_info = 0UL, reserved_insts = 0UL;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx];
hwpm_ip_perfmon *perfmon = NULL;
hwpm_ip_perfmux *perfmux = NULL;
floorsweep_info = (unsigned long)chip_ip->fs_mask;
tegra_hwpm_fn(hwpm, " ");
tegra_hwpm_dbg(hwpm, hwpm_info, "Reserve IP %d, fs_mask 0x%x",
ip_idx, chip_ip->fs_mask);
/* PMA and RTR are already reserved */
if ((ip_idx == T234_HWPM_IP_PMA) || (ip_idx == T234_HWPM_IP_RTR)) {
return 0;
}
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Reserve all perfmon belonging to this instance */
for (perfmon_idx = 0U; perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = t234_hwpm_perfmon_reserve(hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmon %d reserve failed",
ip_idx, perfmon_idx);
goto fail;
}
}
/* Reserve all perfmux belonging to this instance */
for (perfmux_idx = 0U; perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = t234_hwpm_perfmux_reserve(hwpm, perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d reserve failed",
ip_idx, perfmux_idx);
goto fail;
}
}
reserved_insts |= BIT(inst_idx);
}
chip_ip->reserved = true;
return 0;
fail:
/* release reserved instances */
for_each_set_bit(inst_idx, &reserved_insts, 32U) {
/* Release all perfmon belonging to this instance */
for (perfmon_idx = 0U; perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
ret = t234_hwpm_perfmon_disable(hwpm, perfmon);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmon %d disable failed",
ip_idx, perfmon_idx);
}
ret = t234_hwpm_perfmon_release(hwpm, perfmon);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmon %d release failed",
ip_idx, perfmon_idx);
}
}
/* Release all perfmux belonging to this instance */
for (perfmux_idx = 0U; perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
ret = t234_hwpm_perfmux_disable(hwpm, perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d disable failed",
ip_idx, perfmux_idx);
}
ret = t234_hwpm_perfmux_release(hwpm, perfmux);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d perfmux %d release failed",
ip_idx, perfmux_idx);
}
}
}
return err;
}
int t234_hwpm_bind_reserved_resources(struct tegra_soc_hwpm *hwpm)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = NULL;
u32 ip_idx;
u32 perfmux_idx, perfmon_idx;
unsigned long inst_idx = 0UL;
unsigned long floorsweep_info = 0UL;
int err = 0;
hwpm_ip_perfmon *perfmon = NULL;
hwpm_ip_perfmux *perfmux = NULL;
tegra_hwpm_fn(hwpm, " ");
for (ip_idx = 0U; ip_idx < T234_HWPM_IP_MAX; ip_idx++) {
chip_ip = active_chip->chip_ips[ip_idx];
/* Skip unavailable IPs */
if (!chip_ip->reserved) {
continue;
}
if (chip_ip->fs_mask == 0U) {
/* No IP instance is available */
continue;
}
floorsweep_info = (unsigned long)chip_ip->fs_mask;
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
/* Zero out necessary perfmux registers */
for (perfmux_idx = 0U;
perfmux_idx < chip_ip->num_perfmux_slots;
perfmux_idx++) {
perfmux = chip_ip->ip_perfmux[perfmux_idx];
if (perfmux == NULL) {
continue;
}
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = active_chip->zero_alist_regs(
hwpm, perfmux);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmux %d zero regs failed",
ip_idx, perfmux_idx);
}
}
/* Zero out necessary perfmon registers */
/* And enable reporting of PERFMON status */
for (perfmon_idx = 0U;
perfmon_idx < chip_ip->num_perfmon_slots;
perfmon_idx++) {
perfmon = chip_ip->ip_perfmon[perfmon_idx];
if (perfmon == NULL) {
continue;
}
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
continue;
}
err = active_chip->zero_alist_regs(
hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d zero regs failed",
ip_idx, perfmon_idx);
}
err = t234_hwpm_perfmon_enable(hwpm, perfmon);
if (err != 0) {
tegra_hwpm_err(hwpm, "IP %d"
" perfmon %d enable failed",
ip_idx, perfmon_idx);
}
}
}
}
return err;
}

View File

@@ -213,9 +213,15 @@ struct tegra_soc_hwpm_chip {
bool (*is_resource_active)(struct tegra_soc_hwpm *hwpm,
u32 res_index, u32 *config_ip_index);
u32 (*get_pma_int_idx)(struct tegra_soc_hwpm *hwpm);
u32 (*get_rtr_int_idx)(struct tegra_soc_hwpm *hwpm);
u32 (*get_ip_max_idx)(struct tegra_soc_hwpm *hwpm);
int (*init_chip_ip_structures)(struct tegra_soc_hwpm *hwpm);
int (*extract_ip_ops)(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops, bool available);
int (*finalize_chip_info)(struct tegra_soc_hwpm *hwpm);
int (*force_enable_ips)(struct tegra_soc_hwpm *hwpm);
int (*get_fs_info)(struct tegra_soc_hwpm *hwpm,
u32 ip_index, u64 *fs_mask, u8 *ip_status);
@@ -228,10 +234,13 @@ struct tegra_soc_hwpm_chip {
int (*release_pma)(struct tegra_soc_hwpm *hwpm);
int (*release_rtr)(struct tegra_soc_hwpm *hwpm);
int (*reserve_given_resource)(struct tegra_soc_hwpm *hwpm, u32 ip_idx);
int (*bind_reserved_resources)(struct tegra_soc_hwpm *hwpm);
int (*disable_triggers)(struct tegra_soc_hwpm *hwpm);
int (*release_all_resources)(struct tegra_soc_hwpm *hwpm);
int (*perfmon_enable)(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon);
int (*perfmon_disable)(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon);
int (*perfmux_disable)(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux);
int (*disable_mem_mgmt)(struct tegra_soc_hwpm *hwpm);
int (*enable_mem_mgmt)(struct tegra_soc_hwpm *hwpm,
@@ -247,8 +256,10 @@ struct tegra_soc_hwpm_chip {
size_t (*get_alist_buf_size)(struct tegra_soc_hwpm *hwpm);
int (*zero_alist_regs)(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *aperture);
int (*get_alist_size)(struct tegra_soc_hwpm *hwpm);
int (*combine_alist)(struct tegra_soc_hwpm *hwpm, u64 *alist);
int (*copy_alist)(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *aperture,
u64 *full_alist,
u64 *full_alist_idx);
bool (*check_alist)(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *aperture, u64 phys_addr);

View File

@@ -19,12 +19,38 @@ struct tegra_soc_hwpm_exec_reg_ops;
struct tegra_soc_hwpm_ip_floorsweep_info;
struct tegra_soc_hwpm_alloc_pma_stream;
struct tegra_soc_hwpm_update_get_put;
struct tegra_soc_hwpm_ip_ops;
struct hwpm_ip_aperture;
typedef struct hwpm_ip_aperture hwpm_ip_perfmon;
typedef struct hwpm_ip_aperture hwpm_ip_perfmux;
int tegra_hwpm_init_sw_components(struct tegra_soc_hwpm *hwpm);
void tegra_hwpm_release_sw_components(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_init_chip_info(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_init_floorsweep_info(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_reserve_resource(struct tegra_soc_hwpm *hwpm, u32 resource);
int tegra_hwpm_release_resources(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_bind_resources(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_reserve_pma(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_reserve_rtr(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_release_pma(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_release_rtr(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_perfmon_reserve(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon);
int tegra_hwpm_perfmon_release(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmon *perfmon);
int tegra_hwpm_perfmux_reserve(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux);
int tegra_hwpm_perfmux_release(struct tegra_soc_hwpm *hwpm,
hwpm_ip_perfmux *perfmux);
int tegra_hwpm_init_chip_ip_structures(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_set_fs_info_ip_ops(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops,
u64 base_address, u32 ip_idx, bool available);
int tegra_hwpm_finalize_chip_info(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm,
void *ioctl_struct);
@@ -35,7 +61,7 @@ int tegra_hwpm_setup_hw(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_setup_sw(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_disable_triggers(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_release_hw(struct tegra_soc_hwpm *hwpm);
void tegra_hwpm_release_sw_components(struct tegra_soc_hwpm *hwpm);
void tegra_hwpm_release_sw_setup(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_get_floorsweep_info(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_ip_floorsweep_info *fs_info);

View File

@@ -84,11 +84,17 @@ void tegra_soc_hwpm_ip_register(struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops)
struct tegra_soc_hwpm *hwpm = NULL;
int ret = 0;
if (hwpm_ip_ops == NULL) {
tegra_hwpm_err(NULL, "IP details missing");
return;
}
if (tegra_soc_hwpm_pdev == NULL) {
ret = tegra_hwpm_note_ip_register(hwpm_ip_ops);
if (ret != 0) {
tegra_hwpm_err(NULL,
"Couldn't save IP register details");
return;
}
} else {
if (hwpm_ip_ops->ip_dev == NULL) {
@@ -118,6 +124,11 @@ void tegra_soc_hwpm_ip_unregister(struct tegra_soc_hwpm_ip_ops *hwpm_ip_ops)
struct tegra_soc_hwpm *hwpm = NULL;
int ret = 0;
if (hwpm_ip_ops == NULL) {
tegra_hwpm_err(NULL, "IP details missing");
return;
}
if (tegra_soc_hwpm_pdev == NULL) {
tegra_hwpm_dbg(hwpm, hwpm_info, "HWPM device not available");
} else {

View File

@@ -126,7 +126,11 @@ static int tegra_hwpm_probe(struct platform_device *pdev)
}
tegra_hwpm_debugfs_init(hwpm);
tegra_hwpm_init_chip_info(hwpm);
ret = tegra_hwpm_init_sw_components(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to init sw components");
goto init_sw_components_fail;
}
/*
* Currently VDK doesn't have a fmodel for SOC HWPM. Therefore, we
@@ -143,7 +147,7 @@ static int tegra_hwpm_probe(struct platform_device *pdev)
tegra_hwpm_dbg(hwpm, hwpm_info, "Probe successful!");
goto success;
init_sw_components_fail:
clock_reset_fail:
if (tegra_platform_is_silicon()) {
if (hwpm->la_clk)