mirror of
git://nv-tegra.nvidia.com/linux-hwpm.git
synced 2025-12-24 10:13:00 +03:00
tegra: hwpm: combine common functionality
- Many HWPM functions are performed on all apertures of all instances of all IPs. Define below resource utility functions to perform a task on all IPs, instances and apertures: - tegra_hwpm_func_all_IPs - tegra_hwpm_func_single_ip - tegra_hwpm_func_all_instance - tegra_hwpm_func_single_instance - tegra_hwpm_func_all_perfmuxes - tegra_hwpm_func_all_perfmons - tegra_hwpm_func_single_aperture - Modify below functions to use above mentioned utility functions: - get allowlist size - combine allowlist - reserve resources - bind resources - release resources This will make code more legible and maintainable. This patch also defines new function that validates all HAL initializations for the chip. Jira THWPM-41 Change-Id: Icaeba4d94187b97022c0a6626584e7d61ab6d0e4 Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2705524 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: Vasuki Shankar <vasukis@nvidia.com> Reviewed-by: Seema Khowala <seemaj@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
c5b3d09518
commit
25f0737897
@@ -28,92 +28,6 @@
|
||||
#include <tegra_hwpm_common.h>
|
||||
#include <tegra_hwpm_static_analysis.h>
|
||||
|
||||
static int tegra_hwpm_get_alist_size(struct tegra_soc_hwpm *hwpm)
|
||||
{
|
||||
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
|
||||
u32 ip_idx;
|
||||
u32 perfmux_idx, perfmon_idx;
|
||||
unsigned long inst_idx = 0UL;
|
||||
unsigned long floorsweep_info = 0UL;
|
||||
struct hwpm_ip *chip_ip = NULL;
|
||||
hwpm_ip_perfmux *perfmux = NULL;
|
||||
hwpm_ip_perfmon *perfmon = NULL;
|
||||
|
||||
tegra_hwpm_fn(hwpm, " ");
|
||||
|
||||
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
|
||||
ip_idx++) {
|
||||
chip_ip = active_chip->chip_ips[ip_idx];
|
||||
|
||||
/* Skip unavailable IPs */
|
||||
if (!chip_ip->reserved) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (chip_ip->fs_mask == 0U) {
|
||||
/* No IP instance is available */
|
||||
continue;
|
||||
}
|
||||
|
||||
floorsweep_info = (unsigned long)chip_ip->fs_mask;
|
||||
|
||||
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
|
||||
/* Add perfmux alist size to full alist size */
|
||||
for (perfmux_idx = 0U;
|
||||
perfmux_idx < chip_ip->num_perfmux_slots;
|
||||
perfmux_idx++) {
|
||||
perfmux = chip_ip->ip_perfmux[perfmux_idx];
|
||||
|
||||
if (perfmux == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (perfmux->alist) {
|
||||
hwpm->full_alist_size =
|
||||
tegra_hwpm_safe_add_u64(
|
||||
hwpm->full_alist_size,
|
||||
perfmux->alist_size);
|
||||
} else {
|
||||
tegra_hwpm_err(hwpm, "IP %d"
|
||||
" perfmux %d NULL alist",
|
||||
ip_idx, perfmux_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add perfmon alist size to full alist size */
|
||||
for (perfmon_idx = 0U;
|
||||
perfmon_idx < chip_ip->num_perfmon_slots;
|
||||
perfmon_idx++) {
|
||||
perfmon = chip_ip->ip_perfmon[perfmon_idx];
|
||||
|
||||
if (perfmon == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (perfmon->alist) {
|
||||
hwpm->full_alist_size =
|
||||
tegra_hwpm_safe_add_u64(
|
||||
hwpm->full_alist_size,
|
||||
perfmon->alist_size);
|
||||
} else {
|
||||
tegra_hwpm_err(hwpm, "IP %d"
|
||||
" perfmon %d NULL alist",
|
||||
ip_idx, perfmon_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm)
|
||||
{
|
||||
@@ -123,7 +37,7 @@ int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm)
|
||||
|
||||
tegra_hwpm_fn(hwpm, " ");
|
||||
|
||||
ret = tegra_hwpm_get_alist_size(hwpm);
|
||||
ret = tegra_hwpm_func_all_ip(hwpm, NULL, TEGRA_HWPM_GET_ALIST_SIZE);
|
||||
if (ret != 0) {
|
||||
tegra_hwpm_err(hwpm, "get_alist_size failed");
|
||||
return ret;
|
||||
@@ -134,100 +48,29 @@ int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm)
|
||||
|
||||
static int tegra_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist)
|
||||
{
|
||||
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
|
||||
u32 ip_idx;
|
||||
u32 perfmux_idx, perfmon_idx;
|
||||
unsigned long inst_idx = 0UL;
|
||||
unsigned long floorsweep_info = 0UL;
|
||||
struct hwpm_ip *chip_ip = NULL;
|
||||
hwpm_ip_perfmux *perfmux = NULL;
|
||||
hwpm_ip_perfmon *perfmon = NULL;
|
||||
u64 full_alist_idx = 0;
|
||||
struct tegra_hwpm_func_args func_args;
|
||||
int err = 0;
|
||||
|
||||
tegra_hwpm_fn(hwpm, " ");
|
||||
|
||||
for (ip_idx = 0U; ip_idx < active_chip->get_ip_max_idx(hwpm);
|
||||
ip_idx++) {
|
||||
chip_ip = active_chip->chip_ips[ip_idx];
|
||||
func_args.alist = alist;
|
||||
func_args.full_alist_idx = 0ULL;
|
||||
|
||||
/* Skip unavailable IPs */
|
||||
if (!chip_ip->reserved) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (chip_ip->fs_mask == 0U) {
|
||||
/* No IP instance is available */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (hwpm->active_chip->copy_alist == NULL) {
|
||||
tegra_hwpm_err(hwpm, "copy_alist uninitialized");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
floorsweep_info = (unsigned long)chip_ip->fs_mask;
|
||||
|
||||
for_each_set_bit(inst_idx, &floorsweep_info, 32U) {
|
||||
/* Copy perfmux alist to full alist array */
|
||||
for (perfmux_idx = 0U;
|
||||
perfmux_idx < chip_ip->num_perfmux_slots;
|
||||
perfmux_idx++) {
|
||||
perfmux = chip_ip->ip_perfmux[perfmux_idx];
|
||||
|
||||
if (perfmux == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (perfmux->hw_inst_mask != BIT(inst_idx)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
err = hwpm->active_chip->copy_alist(hwpm,
|
||||
perfmux, alist, &full_alist_idx);
|
||||
if (err != 0) {
|
||||
tegra_hwpm_err(hwpm, "IP %d"
|
||||
" perfmux %d alist copy failed",
|
||||
ip_idx, perfmux_idx);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Copy perfmon alist to full alist array */
|
||||
for (perfmon_idx = 0U;
|
||||
perfmon_idx < chip_ip->num_perfmon_slots;
|
||||
perfmon_idx++) {
|
||||
perfmon = chip_ip->ip_perfmon[perfmon_idx];
|
||||
|
||||
if (perfmon == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (perfmon->hw_inst_mask != BIT(inst_idx)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
err = hwpm->active_chip->copy_alist(hwpm,
|
||||
perfmon, alist, &full_alist_idx);
|
||||
if (err != 0) {
|
||||
tegra_hwpm_err(hwpm, "IP %d"
|
||||
" perfmon %d alist copy failed",
|
||||
ip_idx, perfmon_idx);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
err = tegra_hwpm_func_all_ip(hwpm, &func_args,
|
||||
TEGRA_HWPM_COMBINE_ALIST);
|
||||
if (err != 0) {
|
||||
tegra_hwpm_err(hwpm, "combine alist failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Check size of full alist with hwpm->full_alist_size*/
|
||||
if (full_alist_idx != hwpm->full_alist_size) {
|
||||
if (func_args.full_alist_idx != hwpm->full_alist_size) {
|
||||
tegra_hwpm_err(hwpm, "full_alist_size 0x%llx doesn't match "
|
||||
"max full_alist_idx 0x%llx",
|
||||
hwpm->full_alist_size, full_alist_idx);
|
||||
hwpm->full_alist_size, func_args.full_alist_idx);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
fail:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -253,10 +96,7 @@ int tegra_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm,
|
||||
tegra_hwpm_err(hwpm, "Invalid allowlist size");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (hwpm->active_chip->get_alist_buf_size == NULL) {
|
||||
tegra_hwpm_err(hwpm, "alist_buf_size uninitialized");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
alist_buf_size = tegra_hwpm_safe_mult_u64(hwpm->full_alist_size,
|
||||
hwpm->active_chip->get_alist_buf_size(hwpm));
|
||||
|
||||
|
||||
Reference in New Issue
Block a user