tegra: hwpm: move linux APIs in aperture to os

Perfmux/perfmon reserve and release functions use linux APIs to
map/unmap mmio apertures. In an effort to make HWPM driver OS agnostic,
add wrappers to reserve and release apertures.

Jira THWPM-59

Change-Id: I2e8e820ae0b7c46f5656e8dfd2cf7ef370f168cc
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2738157
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Vasuki Shankar <vasukis@nvidia.com>
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Vedashree Vidwans
2022-06-30 01:17:24 -07:00
committed by mobile promotions
parent 42a33fd9d0
commit ae38729467
5 changed files with 241 additions and 114 deletions

View File

@@ -9,6 +9,7 @@ ccflags-y += -I$(srctree.nvidia)/drivers/platform/tegra/hwpm/include
ccflags-y += -I$(srctree.nvidia)/include ccflags-y += -I$(srctree.nvidia)/include
obj-$(CONFIG_DEBUG_FS) += os/linux/debugfs.o obj-$(CONFIG_DEBUG_FS) += os/linux/debugfs.o
obj-y += os/linux/aperture_utils.o
obj-y += os/linux/driver.o obj-y += os/linux/driver.o
obj-y += os/linux/io_utils.o obj-y += os/linux/io_utils.o
obj-y += os/linux/ip_utils.o obj-y += os/linux/ip_utils.o

View File

@@ -11,85 +11,14 @@
* more details. * more details.
*/ */
#include <linux/slab.h>
#include <linux/of_address.h>
#include <tegra_hwpm_static_analysis.h> #include <tegra_hwpm_static_analysis.h>
#include <tegra_hwpm_aperture.h>
#include <tegra_hwpm_common.h> #include <tegra_hwpm_common.h>
#include <tegra_hwpm_kmem.h> #include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_log.h> #include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h> #include <tegra_hwpm_io.h>
#include <tegra_hwpm.h> #include <tegra_hwpm.h>
static int tegra_hwpm_perfmon_reserve(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmon)
{
struct resource *res = NULL;
tegra_hwpm_fn(hwpm, " ");
/* Reserve */
res = platform_get_resource_byname(hwpm->pdev,
IORESOURCE_MEM, perfmon->name);
if ((!res) || (res->start == 0) || (res->end == 0)) {
tegra_hwpm_err(hwpm, "Failed to get perfmon %s", perfmon->name);
return -ENOMEM;
}
perfmon->dt_mmio = devm_ioremap(hwpm->dev, res->start,
resource_size(res));
if (IS_ERR(perfmon->dt_mmio)) {
tegra_hwpm_err(hwpm, "Couldn't map perfmon %s", perfmon->name);
return PTR_ERR(perfmon->dt_mmio);
}
perfmon->start_pa = res->start;
perfmon->end_pa = res->end;
if (hwpm->fake_registers_enabled) {
u64 address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(res->end, res->start), 1ULL);
u64 num_regs = address_range / sizeof(u32);
perfmon->fake_registers =
tegra_hwpm_kcalloc(hwpm, num_regs, sizeof(u32));
if (perfmon->fake_registers == NULL) {
tegra_hwpm_err(hwpm, "Perfmon (0x%llx - 0x%llx) "
"Couldn't allocate memory for fake regs",
perfmon->start_abs_pa, perfmon->end_abs_pa);
return -ENOMEM;
}
}
return 0;
}
static int tegra_hwpm_perfmux_reserve(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmux)
{
tegra_hwpm_fn(hwpm, " ");
perfmux->start_pa = perfmux->start_abs_pa;
perfmux->end_pa = perfmux->end_abs_pa;
/* Allocate fake registers */
if (hwpm->fake_registers_enabled) {
u64 address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(
perfmux->end_pa, perfmux->start_pa), 1ULL);
u64 num_regs = address_range / sizeof(u32);
perfmux->fake_registers =
tegra_hwpm_kcalloc(hwpm, num_regs, sizeof(u32));
if (perfmux->fake_registers == NULL) {
tegra_hwpm_err(hwpm, "Aperture(0x%llx - 0x%llx):"
" Couldn't allocate memory for fake registers",
perfmux->start_pa, perfmux->end_pa);
return -ENOMEM;
}
}
return 0;
}
int tegra_hwpm_element_reserve(struct tegra_soc_hwpm *hwpm, int tegra_hwpm_element_reserve(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *element) struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *element)
{ {
@@ -127,44 +56,6 @@ fail:
return err; return err;
} }
static int tegra_hwpm_perfmon_release(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon)
{
tegra_hwpm_fn(hwpm, " ");
if (perfmon->dt_mmio == NULL) {
tegra_hwpm_err(hwpm, "Perfmon was not mapped");
return -EINVAL;
}
devm_iounmap(hwpm->dev, perfmon->dt_mmio);
perfmon->dt_mmio = NULL;
perfmon->start_pa = 0ULL;
perfmon->end_pa = 0ULL;
if (perfmon->fake_registers) {
tegra_hwpm_kfree(hwpm, perfmon->fake_registers);
perfmon->fake_registers = NULL;
}
return 0;
}
static int tegra_hwpm_perfmux_release(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmux)
{
tegra_hwpm_fn(hwpm, " ");
/*
* Release
* This is only required for fake registers
*/
if (perfmux->fake_registers) {
tegra_hwpm_kfree(hwpm, perfmux->fake_registers);
perfmux->fake_registers = NULL;
}
return 0;
}
int tegra_hwpm_element_release(struct tegra_soc_hwpm *hwpm, int tegra_hwpm_element_release(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *element) struct hwpm_ip_aperture *element)
{ {
@@ -262,8 +153,8 @@ static int tegra_hwpm_alloc_dynamic_inst_element_array(
inst_a_info->inst_slots = tegra_hwpm_safe_cast_u64_to_u32( inst_a_info->inst_slots = tegra_hwpm_safe_cast_u64_to_u32(
ip_element_range / inst_a_info->inst_stride); ip_element_range / inst_a_info->inst_stride);
inst_a_info->inst_arr = kcalloc(inst_a_info->inst_slots, inst_a_info->inst_arr = tegra_hwpm_kcalloc(
sizeof(struct hwpm_ip_inst *), GFP_KERNEL); hwpm, inst_a_info->inst_slots, sizeof(struct hwpm_ip_inst *));
if (inst_a_info->inst_arr == NULL) { if (inst_a_info->inst_arr == NULL) {
tegra_hwpm_err(hwpm, "a_type %d instance array alloc failed", tegra_hwpm_err(hwpm, "a_type %d instance array alloc failed",
a_type); a_type);
@@ -521,8 +412,9 @@ static int tegra_hwpm_func_all_elements_of_type(struct tegra_soc_hwpm *hwpm,
e_info->element_slots = tegra_hwpm_safe_cast_u64_to_u32( e_info->element_slots = tegra_hwpm_safe_cast_u64_to_u32(
inst_element_range / e_info->element_stride); inst_element_range / e_info->element_stride);
e_info->element_arr = kcalloc(e_info->element_slots, e_info->element_arr = tegra_hwpm_kcalloc(
sizeof(struct hwpm_ip_aperture *), GFP_KERNEL); hwpm, e_info->element_slots,
sizeof(struct hwpm_ip_aperture *));
if (e_info->element_arr == NULL) { if (e_info->element_arr == NULL) {
tegra_hwpm_err(hwpm, tegra_hwpm_err(hwpm,
"a_type %d element array alloc failed", a_type); "a_type %d element array alloc failed", a_type);

View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef TEGRA_HWPM_APERTURE_H
#define TEGRA_HWPM_APERTURE_H
#ifdef __KERNEL__
#include <os/linux/aperture_utils.h>
#else
int tegra_hwpm_perfmon_reserve_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmon)
{
return -EINVAL;
}
int tegra_hwpm_perfmux_reserve_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmux)
{
return -EINVAL;
}
int tegra_hwpm_perfmon_release_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon)
{
return -EINVAL;
}
int tegra_hwpm_perfmux_release_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmux)
{
return -EINVAL;
}
#endif
#define tegra_hwpm_perfmon_reserve(hwpm, ip_inst, perfmon) \
tegra_hwpm_perfmon_reserve_impl(hwpm, ip_inst, perfmon)
#define tegra_hwpm_perfmux_reserve(hwpm, ip_inst, perfmux) \
tegra_hwpm_perfmux_reserve_impl(hwpm, ip_inst, perfmux)
#define tegra_hwpm_perfmon_release(hwpm, perfmon) \
tegra_hwpm_perfmon_release_impl(hwpm, perfmon)
#define tegra_hwpm_perfmux_release(hwpm, perfmux) \
tegra_hwpm_perfmux_release_impl(hwpm, perfmux)
#endif /* TEGRA_HWPM_APERTURE_H */

153
os/linux/aperture_utils.c Normal file
View File

@@ -0,0 +1,153 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/slab.h>
#include <linux/of_address.h>
#include <tegra_hwpm_static_analysis.h>
#include <tegra_hwpm_aperture.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
int tegra_hwpm_perfmon_reserve_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmon)
{
struct resource *res = NULL;
tegra_hwpm_fn(hwpm, " ");
/* Reserve */
res = platform_get_resource_byname(hwpm->pdev,
IORESOURCE_MEM, perfmon->name);
if ((!res) || (res->start == 0) || (res->end == 0)) {
tegra_hwpm_err(hwpm, "Failed to get perfmon %s", perfmon->name);
return -ENOMEM;
}
perfmon->dt_mmio = devm_ioremap(hwpm->dev, res->start,
resource_size(res));
if (IS_ERR(perfmon->dt_mmio)) {
tegra_hwpm_err(hwpm, "Couldn't map perfmon %s", perfmon->name);
return PTR_ERR(perfmon->dt_mmio);
}
perfmon->start_pa = res->start;
perfmon->end_pa = res->end;
if (hwpm->fake_registers_enabled) {
u64 address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(res->end, res->start), 1ULL);
u64 num_regs = address_range / sizeof(u32);
perfmon->fake_registers =
tegra_hwpm_kcalloc(hwpm, num_regs, sizeof(u32));
if (perfmon->fake_registers == NULL) {
tegra_hwpm_err(hwpm, "Perfmon (0x%llx - 0x%llx) "
"Couldn't allocate memory for fake regs",
perfmon->start_abs_pa, perfmon->end_abs_pa);
return -ENOMEM;
}
}
return 0;
}
int tegra_hwpm_perfmux_reserve_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmux)
{
int ret = 0;
u32 reg_val = 0U;
tegra_hwpm_fn(hwpm, " ");
perfmux->start_pa = perfmux->start_abs_pa;
perfmux->end_pa = perfmux->end_abs_pa;
/* Allocate fake registers */
if (hwpm->fake_registers_enabled) {
u64 address_range = tegra_hwpm_safe_add_u64(
tegra_hwpm_safe_sub_u64(
perfmux->end_pa, perfmux->start_pa), 1ULL);
u64 num_regs = address_range / sizeof(u32);
perfmux->fake_registers =
tegra_hwpm_kcalloc(hwpm, num_regs, sizeof(u32));
if (perfmux->fake_registers == NULL) {
tegra_hwpm_err(hwpm, "Aperture(0x%llx - 0x%llx):"
" Couldn't allocate memory for fake registers",
perfmux->start_pa, perfmux->end_pa);
return -ENOMEM;
}
}
/* Validate perfmux availability by reading 1st alist offset */
ret = tegra_hwpm_regops_readl(hwpm, ip_inst, perfmux,
tegra_hwpm_safe_add_u64(perfmux->start_abs_pa,
perfmux->alist[0U].reg_offset), &reg_val);
if (ret != 0) {
/*
* If an IP element is unavailable, perfmux register
* read will return with failure.
* Mark corresponding element as unavailable.
* NOTE: This is possible if IP elements are floorswept.
* Hence, failure should not be propagated.
*/
tegra_hwpm_dbg(hwpm, hwpm_dbg_reserve_resource,
"perfmux start_abs_pa 0x%llx unavailable",
perfmux->start_abs_pa);
ip_inst->element_fs_mask &= ~(perfmux->element_index_mask);
}
return 0;
}
int tegra_hwpm_perfmon_release_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon)
{
tegra_hwpm_fn(hwpm, " ");
if (perfmon->dt_mmio == NULL) {
tegra_hwpm_err(hwpm, "Perfmon was not mapped");
return -EINVAL;
}
devm_iounmap(hwpm->dev, perfmon->dt_mmio);
perfmon->dt_mmio = NULL;
perfmon->start_pa = 0ULL;
perfmon->end_pa = 0ULL;
if (perfmon->fake_registers) {
tegra_hwpm_kfree(hwpm, perfmon->fake_registers);
perfmon->fake_registers = NULL;
}
return 0;
}
int tegra_hwpm_perfmux_release_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmux)
{
tegra_hwpm_fn(hwpm, " ");
/*
* Release
* This is only required for fake registers
*/
if (perfmux->fake_registers) {
tegra_hwpm_kfree(hwpm, perfmux->fake_registers);
perfmux->fake_registers = NULL;
}
return 0;
}

30
os/linux/aperture_utils.h Normal file
View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef TEGRA_HWPM_OS_LINUX_APERTURE_UTILS_H
#define TEGRA_HWPM_OS_LINUX_APERTURE_UTILS_H
struct tegra_soc_hwpm;
struct hwpm_ip_inst;
struct hwpm_ip_aperture;
int tegra_hwpm_perfmon_reserve_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmon);
int tegra_hwpm_perfmux_reserve_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmux);
int tegra_hwpm_perfmon_release_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon);
int tegra_hwpm_perfmux_release_impl(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmux);
#endif /* TEGRA_HWPM_OS_LINUX_APERTURE_UTILS_H */