tegra: hwpm: update element fs mask at open

IPs supported for performance management register themselves with
required funtion pointers. The HWPM driver processes the request and
marks the given IP instance valid.
To mark elements of the registered IP instance valid, HWPM reads one of
the allowlist registers. Register read is essential to confirm
availability of the element. Register reads for floorswept elements
should return with error.

Currently, HWPM driver marks all elements of an IP instance as available
at registration time. The process to update element mask considering
floorswept elements is performed during IP reservation.
Users querying floorsweeping info before reservation ioctls would
receive incorrect fs_info masks.

To fix this issue, move updating of IP instance element fs_info at
device open. If device is already opened, update element mask during
registration.
- Implement TEGRA_HWPM_UPDATE_IP_INST_MASK case to update element fs
mask for given IP.

Bug 3685203
Bug 3584061

Change-Id: If992204e6f7debf24b36a94f2b752b5077333cda
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2734732
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Vedashree Vidwans
2022-06-23 22:37:19 -07:00
committed by mobile promotions
parent 1563712b77
commit 56007741bc
4 changed files with 90 additions and 61 deletions

View File

@@ -64,9 +64,6 @@ static int tegra_hwpm_perfmon_reserve(struct tegra_soc_hwpm *hwpm,
static int tegra_hwpm_perfmux_reserve(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, struct hwpm_ip_aperture *perfmux)
{
int ret = 0;
u32 reg_val = 0U;
tegra_hwpm_fn(hwpm, " ");
perfmux->start_pa = perfmux->start_abs_pa;
@@ -89,25 +86,6 @@ static int tegra_hwpm_perfmux_reserve(struct tegra_soc_hwpm *hwpm,
}
}
/* Validate perfmux availability by reading 1st alist offset */
ret = tegra_hwpm_regops_readl(hwpm, ip_inst, perfmux,
tegra_hwpm_safe_add_u64(perfmux->start_abs_pa,
perfmux->alist[0U].reg_offset), &reg_val);
if (ret != 0) {
/*
* If an IP element is unavailable, perfmux register
* read will return with failure.
* Mark corresponding element as unavailable.
* NOTE: This is possible if IP elements are floorswept.
* Hence, failure should not be propagated.
*/
tegra_hwpm_dbg(hwpm, hwpm_dbg_reserve_resource,
"perfmux start_abs_pa 0x%llx unavailable",
perfmux->start_abs_pa);
ip_inst->element_fs_mask &= ~(perfmux->element_index_mask);
}
return 0;
}
@@ -336,6 +314,7 @@ static int tegra_hwpm_func_single_element(struct tegra_soc_hwpm *hwpm,
&e_info->element_static_array[static_aperture_idx];
u64 element_offset = 0ULL;
u32 idx = 0U;
u32 reg_val = 0U;
tegra_hwpm_fn(hwpm, " ");
@@ -355,9 +334,33 @@ static int tegra_hwpm_func_single_element(struct tegra_soc_hwpm *hwpm,
ip_idx, static_inst_idx, a_type, element->element_type,
element->start_abs_pa, static_aperture_idx, idx);
/* Set perfmux slot pointer */
/* Set element slot pointer */
e_info->element_arr[idx] = element;
break;
case TEGRA_HWPM_UPDATE_IP_INST_MASK:
/* Validate perfmux availability by reading 1st alist offset */
ret = tegra_hwpm_regops_readl(hwpm, ip_inst, element,
tegra_hwpm_safe_add_u64(element->start_abs_pa,
element->alist[0U].reg_offset), &reg_val);
if (ret != 0) {
/*
* If an IP element is unavailable, perfmux register
* read will return with failure.
* Mark corresponding element as unavailable.
* NOTE: This is possible for floorswept IP elements.
* Hence, failure should not be propagated.
*/
tegra_hwpm_dbg(hwpm, hwpm_dbg_floorsweep_info,
"perfmux start_abs_pa 0x%llx unavailable",
element->start_abs_pa);
ip_inst->element_fs_mask &=
~(element->element_index_mask);
} else {
/* Update element mask in the instance */
ip_inst->element_fs_mask |= element->element_index_mask;
}
break;
case TEGRA_HWPM_GET_ALIST_SIZE:
if ((element->element_index_mask &
ip_inst->element_fs_mask) == 0U) {
@@ -525,6 +528,13 @@ static int tegra_hwpm_func_all_elements_of_type(struct tegra_soc_hwpm *hwpm,
}
}
if (iia_func == TEGRA_HWPM_UPDATE_IP_INST_MASK) {
if (a_type != TEGRA_HWPM_APERTURE_TYPE_PERFMUX) {
/* Only perfmuxes are essential for element_fs_mask */
return 0;
}
}
for (static_idx = 0U; static_idx < e_info->num_element_per_inst;
static_idx++) {
err = tegra_hwpm_func_single_element(
@@ -622,11 +632,14 @@ static int tegra_hwpm_func_single_inst(struct tegra_soc_hwpm *hwpm,
}
}
if (iia_func == TEGRA_HWPM_RESERVE_GIVEN_RESOURCE) {
/*
* Disable IP power management indicating
* start of profiling session
*/
if ((iia_func == TEGRA_HWPM_RESERVE_GIVEN_RESOURCE) ||
(iia_func == TEGRA_HWPM_UPDATE_IP_INST_MASK)) {
if ((chip_ip->inst_fs_mask & ip_inst->hw_inst_mask) == 0U) {
/* This instance is unavailable */
return 0;
}
/* Disable IP power management */
err = tegra_hwpm_ip_handle_power_mgmt(hwpm, ip_inst, true);
if (err != 0) {
tegra_hwpm_err(hwpm,
@@ -645,11 +658,21 @@ static int tegra_hwpm_func_single_inst(struct tegra_soc_hwpm *hwpm,
goto fail;
}
if (iia_func == TEGRA_HWPM_RELEASE_RESOURCES) {
/*
* Enable IP power management indicating
* end of profiling session
*/
if (iia_func == TEGRA_HWPM_UPDATE_IP_INST_MASK) {
if (ip_inst->element_fs_mask == 0U) {
/* No element available in this inst */
chip_ip->inst_fs_mask &= ~(ip_inst->hw_inst_mask);
}
if (chip_ip->inst_fs_mask == 0U) {
/* No instance is available */
chip_ip->resource_status =
TEGRA_HWPM_RESOURCE_STATUS_INVALID;
}
}
if ((iia_func == TEGRA_HWPM_RELEASE_RESOURCES) ||
(iia_func == TEGRA_HWPM_UPDATE_IP_INST_MASK)) {
/* Enable IP power management */
err = tegra_hwpm_ip_handle_power_mgmt(hwpm, ip_inst, false);
if (err != 0) {
tegra_hwpm_err(hwpm,
@@ -725,6 +748,14 @@ int tegra_hwpm_func_single_ip(struct tegra_soc_hwpm *hwpm,
}
switch (iia_func) {
case TEGRA_HWPM_UPDATE_IP_INST_MASK:
if (chip_ip->inst_fs_mask == 0U) {
/* No available IP instances */
tegra_hwpm_dbg(hwpm, hwpm_dbg_floorsweep_info,
"Chip IP %d not available", ip_idx);
return 0;
}
break;
case TEGRA_HWPM_GET_ALIST_SIZE:
case TEGRA_HWPM_COMBINE_ALIST:
case TEGRA_HWPM_BIND_RESOURCES:

View File

@@ -151,6 +151,13 @@ int tegra_hwpm_setup_sw(struct tegra_soc_hwpm *hwpm)
return ret;
}
ret = tegra_hwpm_func_all_ip(hwpm, NULL,
TEGRA_HWPM_UPDATE_IP_INST_MASK);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to update IP fs_info");
return ret;
}
/* Initialize SW state */
hwpm->bind_completed = false;
hwpm->full_alist_size = 0;

View File

@@ -99,32 +99,6 @@ int tegra_hwpm_ip_handle_power_mgmt(struct tegra_soc_hwpm *hwpm,
return err;
}
static int tegra_hwpm_update_ip_inst_element_fs_mask(
struct tegra_soc_hwpm *hwpm,
u32 ip_idx, u32 a_type, u32 inst_idx, bool available)
{
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[ip_idx];
struct hwpm_ip_inst_per_aperture_info *inst_a_info =
&chip_ip->inst_aperture_info[a_type];
struct hwpm_ip_inst *ip_inst = inst_a_info->inst_arr[inst_idx];
struct hwpm_ip_element_info *perfmux_info =
&ip_inst->element_info[TEGRA_HWPM_APERTURE_TYPE_PERFMUX];
struct hwpm_ip_aperture *perfmux = NULL;
u32 idx = 0U;
tegra_hwpm_fn(hwpm, " ");
/* Only perfmuxes are essential for element_fs_mask */
for (idx = 0U; idx < perfmux_info->num_element_per_inst; idx++) {
perfmux = &perfmux_info->element_static_array[idx];
ip_inst->element_fs_mask |= perfmux->element_index_mask;
}
return 0;
}
static int tegra_hwpm_update_ip_inst_fs_mask(struct tegra_soc_hwpm *hwpm,
u32 ip_idx, u32 a_type, u32 inst_idx, bool available)
{
@@ -133,6 +107,7 @@ static int tegra_hwpm_update_ip_inst_fs_mask(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst_per_aperture_info *inst_a_info =
&chip_ip->inst_aperture_info[a_type];
struct hwpm_ip_inst *ip_inst = inst_a_info->inst_arr[inst_idx];
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
@@ -140,6 +115,22 @@ static int tegra_hwpm_update_ip_inst_fs_mask(struct tegra_soc_hwpm *hwpm,
if (available) {
chip_ip->inst_fs_mask |= ip_inst->hw_inst_mask;
chip_ip->resource_status = TEGRA_HWPM_RESOURCE_STATUS_VALID;
if (hwpm->device_opened) {
/*
* IP fs_info is updated during device open call
* However, if IP registers after HWPM device was open,
* this function call will update IP element mask
*/
ret = tegra_hwpm_func_single_ip(hwpm, NULL,
TEGRA_HWPM_UPDATE_IP_INST_MASK, ip_idx);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"IP %d Failed to update fs_info",
ip_idx);
return ret;
}
}
} else {
chip_ip->inst_fs_mask &= ~(ip_inst->hw_inst_mask);
if (chip_ip->inst_fs_mask == 0U) {
@@ -148,8 +139,7 @@ static int tegra_hwpm_update_ip_inst_fs_mask(struct tegra_soc_hwpm *hwpm,
}
}
return tegra_hwpm_update_ip_inst_element_fs_mask(hwpm, ip_idx,
a_type, inst_idx, available);
return 0;
}
static int tegra_hwpm_update_ip_ops_info(struct tegra_soc_hwpm *hwpm,

View File

@@ -134,6 +134,7 @@ enum tegra_hwpm_element_type {
enum tegra_hwpm_funcs {
TEGRA_HWPM_INIT_IP_STRUCTURES,
TEGRA_HWPM_MATCH_BASE_ADDRESS,
TEGRA_HWPM_UPDATE_IP_INST_MASK,
TEGRA_HWPM_GET_ALIST_SIZE,
TEGRA_HWPM_COMBINE_ALIST,
TEGRA_HWPM_RESERVE_GIVEN_RESOURCE,