diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c index 577acda32..43928ac1f 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobjgrp.c +++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp.c @@ -254,6 +254,58 @@ static u32 boardobjgrp_pmustatusinstget_stub(struct gk20a *g, return -EINVAL; } +u32 boardobjgrp_pmudatainit_legacy(struct gk20a *g, + struct boardobjgrp *pboardobjgrp, + struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) +{ + u32 status = 0; + struct boardobj *pboardobj = NULL; + struct nv_pmu_boardobj *ppmudata = NULL; + u8 index; + + gk20a_dbg_info(""); + + if (pboardobjgrp == NULL) + return -EINVAL; + if (pboardobjgrppmu == NULL) + return -EINVAL; + + boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)pboardobjgrppmu, + pboardobjgrp->objmask); + + BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, index, pboardobjgrp->objmask) { + /* Obtain pointer to the current instance of the Object from the Group */ + pboardobj = pboardobjgrp->objgetbyidx(pboardobjgrp, index); + if (NULL == pboardobj) { + gk20a_err(dev_from_gk20a(g), + "could not get object instance"); + status = -EINVAL; + goto boardobjgrppmudatainit_legacy_done; + } + + status = pboardobjgrp->pmudatainstget(g, + (struct nv_pmu_boardobjgrp *)pboardobjgrppmu, + &ppmudata, index); + if (status) { + gk20a_err(dev_from_gk20a(g), + "could not get object instance"); + goto boardobjgrppmudatainit_legacy_done; + } + + /* Initialize the PMU Data */ + status = pboardobj->pmudatainit(g, pboardobj, ppmudata); + if (status) { + gk20a_err(dev_from_gk20a(g), + "could not parse pmu for device %d", index); + goto boardobjgrppmudatainit_legacy_done; + } + } + BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END + +boardobjgrppmudatainit_legacy_done: + gk20a_dbg_info(" Done"); + return status; +} u32 boardobjgrp_pmudatainit_super(struct gk20a *g, struct boardobjgrp *pboardobjgrp, struct nv_pmu_boardobjgrp_super *pboardobjgrppmu) diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp.h b/drivers/gpu/nvgpu/boardobj/boardobjgrp.h index 6527bbdcf..7baa5bea3 100644 --- a/drivers/gpu/nvgpu/boardobj/boardobjgrp.h +++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp.h @@ -254,6 +254,19 @@ struct boardobjgrp { #define BOARDOBJGRP_FOR_EACH(_pgrp, _ptype, _pobj, _index) \ BOARDOBJGRP_ITERATOR(_pgrp, _ptype, _pobj, _index, NULL) +#define BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(mask_width, index, mask) \ +{ \ + u##mask_width lcl_msk = (u##mask_width)(mask); \ + for (index = 0; lcl_msk != 0; index++, lcl_msk >>= 1) { \ + if (((u##mask_width)((u64)1) & lcl_msk) == 0) { \ + continue; \ + } + +#define BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END \ + } \ +} + + /*! * Invalid UNIT_ID. Used to indicate that the implementing class has not set * @ref BOARDOBJGRP::unitId and, thus, certain BOARDOBJGRP PMU interfaces are diff --git a/drivers/gpu/nvgpu/ctrl/ctrlpmgr.h b/drivers/gpu/nvgpu/ctrl/ctrlpmgr.h new file mode 100644 index 000000000..ba55e4a5c --- /dev/null +++ b/drivers/gpu/nvgpu/ctrl/ctrlpmgr.h @@ -0,0 +1,88 @@ +/* + * Control pmgr state infrastructure + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#ifndef _ctrlpmgr_h_ +#define _ctrlpmgr_h_ + +#include "ctrlboardobj.h" + +/* valid power domain values */ +#define CTRL_PMGR_PWR_DEVICES_MAX_DEVICES 32 +#define CTRL_PMGR_PWR_VIOLATION_MAX 0x06 + +#define CTRL_PMGR_PWR_DEVICE_TYPE_INA3221 0x4E + +#define CTRL_PMGR_PWR_CHANNEL_INDEX_INVALID 0xFF +#define CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR 0x08 + +#define CTRL_PMGR_PWR_POLICY_TABLE_VERSION_3X 0x30 +#define CTRL_PMGR_PWR_POLICY_TYPE_HW_THRESHOLD 0x04 + +#define CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS 0x8 +#define CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES 0x08 +#define CTRL_PMGR_PWR_POLICY_INDEX_INVALID 0xFF +#define CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM 0xFE +#define CTRL_PMGR_PWR_POLICY_LIMIT_MAX (0xFFFFFFFF) + +struct ctrl_pmgr_pwr_device_info_rshunt { + bool use_fxp8_8; + u16 rshunt_value; +}; + +struct ctrl_pmgr_pwr_policy_info_integral { + u8 past_sample_count; + u8 next_sample_count; + u16 ratio_limit_min; + u16 ratio_limit_max; +}; + +enum ctrl_pmgr_pwr_policy_filter_type { + CTRL_PMGR_PWR_POLICY_FILTER_TYPE_NONE = 0, + CTRL_PMGR_PWR_POLICY_FILTER_TYPE_BLOCK, + CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE, + CTRL_PMGR_PWR_POLICY_FILTER_TYPE_IIR +}; + +struct ctrl_pmgr_pwr_policy_filter_param_block { + u32 block_size; +}; + +struct ctrl_pmgr_pwr_policy_filter_param_moving_average { + u32 window_size; +}; + +struct ctrl_pmgr_pwr_policy_filter_param_iir { + u32 divisor; +}; + +union ctrl_pmgr_pwr_policy_filter_param { + struct ctrl_pmgr_pwr_policy_filter_param_block block; + struct ctrl_pmgr_pwr_policy_filter_param_moving_average moving_avg; + struct ctrl_pmgr_pwr_policy_filter_param_iir iir; +}; + +struct ctrl_pmgr_pwr_policy_limit_input { + u8 pwr_policy_idx; + u32 limit_value; +}; + +struct ctrl_pmgr_pwr_policy_limit_arbitration { + bool b_arb_max; + u8 num_inputs; + u32 output; + struct ctrl_pmgr_pwr_policy_limit_input + inputs[CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS]; +}; + +#endif diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 58e69cbec..22a9ce4cd 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h @@ -57,9 +57,11 @@ struct acr_desc; #ifdef CONFIG_ARCH_TEGRA_18x_SOC #include "clk/clk.h" #include "perf/perf.h" +#include "pmgr/pmgr.h" #endif #include "gm206/bios_gm206.h" + /* PTIMER_REF_FREQ_HZ corresponds to a period of 32 nanoseconds. 32 ns is the resolution of ptimer. */ #define PTIMER_REF_FREQ_HZ 31250000 @@ -784,6 +786,7 @@ struct gk20a { #ifdef CONFIG_ARCH_TEGRA_18x_SOC struct clk_pmupstate clk_pmu; struct perf_pmupstate perf_pmu; + struct pmgr_pmupstate pmgr_pmu; #endif #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index b28fd597f..d6d57880c 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h @@ -26,6 +26,7 @@ #include "pmuif/gpmuifboardobj.h" #include "pmuif/gpmuifclk.h" #include "pmuif/gpmuifperf.h" +#include "pmuif/gpmuifpmgr.h" /* defined by pmu hw spec */ #define GK20A_PMU_VA_SIZE (512 * 1024 * 1024) @@ -179,6 +180,7 @@ struct pmu_ucode_desc_v1 { #define PMU_UNIT_RC (0x1F) #define PMU_UNIT_FECS_MEM_OVERRIDE (0x1E) #define PMU_UNIT_CLK (0x0D) +#define PMU_UNIT_PMGR (0x18) #define PMU_UNIT_END (0x23) @@ -358,6 +360,7 @@ struct pmu_cmd { struct nv_pmu_boardobj_cmd boardobj; struct nv_pmu_perf_cmd perf; struct nv_pmu_clk_cmd clk; + struct nv_pmu_pmgr_cmd pmgr; } cmd; }; @@ -373,6 +376,7 @@ struct pmu_msg { struct nv_pmu_boardobj_msg boardobj; struct nv_pmu_perf_msg perf; struct nv_pmu_clk_msg clk; + struct nv_pmu_pmgr_msg pmgr; } msg; }; diff --git a/drivers/gpu/nvgpu/gm206/bios_gm206.h b/drivers/gpu/nvgpu/gm206/bios_gm206.h index 937ba0c5d..f8187631b 100644 --- a/drivers/gpu/nvgpu/gm206/bios_gm206.h +++ b/drivers/gpu/nvgpu/gm206/bios_gm206.h @@ -37,6 +37,9 @@ enum { THERMAL_COOLERS_TABLE, PERFORMANCE_SETTINGS_SCRIPT, CONTINUOUS_VIRTUAL_BINNING_TABLE, + POWER_SENSORS_TABLE = 0xA, + POWER_CAPPING_TABLE = 0xB, + POWER_TOPOLOGY_TABLE = 0xF, }; enum { diff --git a/drivers/gpu/nvgpu/pmuif/gpmuifpmgr.h b/drivers/gpu/nvgpu/pmuif/gpmuifpmgr.h new file mode 100644 index 000000000..613dcea6c --- /dev/null +++ b/drivers/gpu/nvgpu/pmuif/gpmuifpmgr.h @@ -0,0 +1,426 @@ +/* +* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ + +#ifndef _GPMUIFPMGR_H_ +#define _GPMUIFPMGR_H_ + +#include "gk20a/gk20a.h" +#include "gk20a/pmu_gk20a.h" +#include "ctrl/ctrlpmgr.h" +#include "pmuif/gpmuifboardobj.h" +#include "gk20a/pmu_common.h" + +struct nv_pmu_pmgr_i2c_device_desc { + struct nv_pmu_boardobj super; + u8 dcb_index; + u16 i2c_address; + u32 i2c_flags; + u8 i2c_port; +}; + +#define NV_PMU_PMGR_I2C_DEVICE_DESC_TABLE_MAX_DEVICES (32) + +struct nv_pmu_pmgr_i2c_device_desc_table { + u32 dev_mask; + struct nv_pmu_pmgr_i2c_device_desc + devices[NV_PMU_PMGR_I2C_DEVICE_DESC_TABLE_MAX_DEVICES]; +}; + +struct nv_pmu_pmgr_pwr_device_desc { + struct nv_pmu_boardobj super; + u32 power_corr_factor; +}; + +#define NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM 0x03 + +struct nv_pmu_pmgr_pwr_device_desc_ina3221 { + struct nv_pmu_pmgr_pwr_device_desc super; + u8 i2c_dev_idx; + struct ctrl_pmgr_pwr_device_info_rshunt + r_shuntm_ohm[NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM]; + u16 configuration; + u16 mask_enable; + u32 event_mask; + u16 curr_correct_m; + s16 curr_correct_b; +}; + +union nv_pmu_pmgr_pwr_device_desc_union { + struct nv_pmu_boardobj board_obj; + struct nv_pmu_pmgr_pwr_device_desc pwr_dev; + struct nv_pmu_pmgr_pwr_device_desc_ina3221 ina3221; +}; + +struct nv_pmu_pmgr_pwr_device_ba_info { + bool b_initialized_and_used; +}; + +struct nv_pmu_pmgr_pwr_device_desc_table_header { + struct nv_pmu_boardobjgrp_e32 super; + struct nv_pmu_pmgr_pwr_device_ba_info ba_info; +}; + +NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_pmgr_pwr_device_desc_table_header, + sizeof(struct nv_pmu_pmgr_pwr_device_desc_table_header)); +NV_PMU_MAKE_ALIGNED_UNION(nv_pmu_pmgr_pwr_device_desc_union, + sizeof(union nv_pmu_pmgr_pwr_device_desc_union)); + +struct nv_pmu_pmgr_pwr_device_desc_table { + union nv_pmu_pmgr_pwr_device_desc_table_header_aligned hdr; + union nv_pmu_pmgr_pwr_device_desc_union_aligned + devices[CTRL_PMGR_PWR_DEVICES_MAX_DEVICES]; +}; + +union nv_pmu_pmgr_pwr_device_dmem_size { + union nv_pmu_pmgr_pwr_device_desc_table_header_aligned pwr_device_hdr; + union nv_pmu_pmgr_pwr_device_desc_union_aligned pwr_device; +}; + +struct nv_pmu_pmgr_pwr_channel { + struct nv_pmu_boardobj super; + u8 pwr_rail; + u8 ch_idx; + u32 volt_fixedu_v; + u32 pwr_corr_slope; + s32 pwr_corr_offsetm_w; + u32 curr_corr_slope; + s32 curr_corr_offsetm_a; + u32 dependent_ch_mask; +}; + +#define NV_PMU_PMGR_PWR_CHANNEL_MAX_CHANNELS 16 + +#define NV_PMU_PMGR_PWR_CHANNEL_MAX_CHRELATIONSHIPS 16 + +struct nv_pmu_pmgr_pwr_channel_sensor { + struct nv_pmu_pmgr_pwr_channel super; + u8 pwr_dev_idx; + u8 pwr_dev_prov_idx; +}; + +struct nv_pmu_pmgr_pwr_channel_pmu_compactible { + u8 pmu_compactible_data[56]; +}; + +union nv_pmu_pmgr_pwr_channel_union { + struct nv_pmu_boardobj board_obj; + struct nv_pmu_pmgr_pwr_channel pwr_channel; + struct nv_pmu_pmgr_pwr_channel_sensor sensor; + struct nv_pmu_pmgr_pwr_channel_pmu_compactible pmu_pwr_channel; +}; + +#define NV_PMU_PMGR_PWR_MONITOR_TYPE_NO_POLLING 0x02 + +struct nv_pmu_pmgr_pwr_monitor_pstate { + u32 hw_channel_mask; +}; + +union nv_pmu_pmgr_pwr_monitor_type_specific { + struct nv_pmu_pmgr_pwr_monitor_pstate pstate; +}; + +struct nv_pmu_pmgr_pwr_chrelationship_pmu_compactible { + u8 pmu_compactible_data[28]; +}; + +union nv_pmu_pmgr_pwr_chrelationship_union { + struct nv_pmu_boardobj board_obj; + struct nv_pmu_pmgr_pwr_chrelationship_pmu_compactible pmu_pwr_chrelationship; +}; + +struct nv_pmu_pmgr_pwr_channel_header { + struct nv_pmu_boardobjgrp_e32 super; + u8 type; + union nv_pmu_pmgr_pwr_monitor_type_specific type_specific; + u8 sample_count; + u16 sampling_periodms; + u16 sampling_period_low_powerms; + u32 total_gpu_power_channel_mask; + u32 physical_channel_mask; +}; + +struct nv_pmu_pmgr_pwr_chrelationship_header { + struct nv_pmu_boardobjgrp_e32 super; +}; + +NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_pmgr_pwr_channel_header, + sizeof(struct nv_pmu_pmgr_pwr_channel_header)); +NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_pmgr_pwr_chrelationship_header, + sizeof(struct nv_pmu_pmgr_pwr_chrelationship_header)); +NV_PMU_MAKE_ALIGNED_UNION(nv_pmu_pmgr_pwr_chrelationship_union, + sizeof(union nv_pmu_pmgr_pwr_chrelationship_union)); +NV_PMU_MAKE_ALIGNED_UNION(nv_pmu_pmgr_pwr_channel_union, + sizeof(union nv_pmu_pmgr_pwr_channel_union)); + +struct nv_pmu_pmgr_pwr_channel_desc { + union nv_pmu_pmgr_pwr_channel_header_aligned hdr; + union nv_pmu_pmgr_pwr_channel_union_aligned + channels[NV_PMU_PMGR_PWR_CHANNEL_MAX_CHANNELS]; +}; + +struct nv_pmu_pmgr_pwr_chrelationship_desc { + union nv_pmu_pmgr_pwr_chrelationship_header_aligned hdr; + union nv_pmu_pmgr_pwr_chrelationship_union_aligned + ch_rels[NV_PMU_PMGR_PWR_CHANNEL_MAX_CHRELATIONSHIPS]; +}; + +union nv_pmu_pmgr_pwr_monitor_dmem_size { + union nv_pmu_pmgr_pwr_channel_header_aligned channel_hdr; + union nv_pmu_pmgr_pwr_channel_union_aligned channel; + union nv_pmu_pmgr_pwr_chrelationship_header_aligned ch_rels_hdr; + union nv_pmu_pmgr_pwr_chrelationship_union_aligned ch_rels; +}; + +struct nv_pmu_pmgr_pwr_monitor_pack { + struct nv_pmu_pmgr_pwr_channel_desc channels; + struct nv_pmu_pmgr_pwr_chrelationship_desc ch_rels; +}; + +#define NV_PMU_PMGR_PWR_POLICY_MAX_POLICIES 32 + +#define NV_PMU_PMGR_PWR_POLICY_MAX_POLICY_RELATIONSHIPS 32 + +struct nv_pmu_pmgr_pwr_policy { + struct nv_pmu_boardobj super; + u8 ch_idx; + u8 num_limit_inputs; + u8 limit_unit; + u8 sample_mult; + u32 limit_curr; + u32 limit_min; + u32 limit_max; + struct ctrl_pmgr_pwr_policy_info_integral integral; + enum ctrl_pmgr_pwr_policy_filter_type filter_type; + union ctrl_pmgr_pwr_policy_filter_param filter_param; +}; + +struct nv_pmu_pmgr_pwr_policy_hw_threshold { + struct nv_pmu_pmgr_pwr_policy super; + u8 threshold_idx; + u8 low_threshold_idx; + bool b_use_low_threshold; + u16 low_threshold_value; +}; + +struct nv_pmu_pmgr_pwr_policy_pmu_compactible { + u8 pmu_compactible_data[68]; +}; + +union nv_pmu_pmgr_pwr_policy_union { + struct nv_pmu_boardobj board_obj; + struct nv_pmu_pmgr_pwr_policy pwr_policy; + struct nv_pmu_pmgr_pwr_policy_hw_threshold hw_threshold; + struct nv_pmu_pmgr_pwr_policy_pmu_compactible pmu_pwr_policy; +}; + +struct nv_pmu_pmgr_pwr_policy_relationship_pmu_compactible { + u8 pmu_compactible_data[24]; +}; + +union nv_pmu_pmgr_pwr_policy_relationship_union { + struct nv_pmu_boardobj board_obj; + struct nv_pmu_pmgr_pwr_policy_relationship_pmu_compactible pmu_pwr_relationship; +}; + +struct nv_pmu_pmgr_pwr_violation_pmu_compactible { + u8 pmu_compactible_data[16]; +}; + +union nv_pmu_pmgr_pwr_violation_union { + struct nv_pmu_boardobj board_obj; + struct nv_pmu_pmgr_pwr_violation_pmu_compactible violation; +}; + +#define NV_PMU_PMGR_PWR_POLICY_DESC_TABLE_VERSION_3X 0x30 + +NV_PMU_MAKE_ALIGNED_UNION(nv_pmu_pmgr_pwr_policy_union, + sizeof(union nv_pmu_pmgr_pwr_policy_union)); +NV_PMU_MAKE_ALIGNED_UNION(nv_pmu_pmgr_pwr_policy_relationship_union, + sizeof(union nv_pmu_pmgr_pwr_policy_relationship_union)); + +#define NV_PMU_PERF_DOMAIN_GROUP_MAX_GROUPS 2 + +struct nv_pmu_perf_domain_group_limits +{ + u32 values[NV_PMU_PERF_DOMAIN_GROUP_MAX_GROUPS]; +} ; + +#define NV_PMU_PMGR_RESERVED_PWR_POLICY_MASK_COUNT 0x6 + +struct nv_pmu_pmgr_pwr_policy_desc_header { + struct nv_pmu_boardobjgrp_e32 super; + u8 version; + bool b_enabled; + u8 low_sampling_mult; + u8 semantic_policy_tbl[CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES]; + u16 base_sample_period; + u16 min_client_sample_period; + u32 reserved_pmu_policy_mask[NV_PMU_PMGR_RESERVED_PWR_POLICY_MASK_COUNT]; + struct nv_pmu_perf_domain_group_limits global_ceiling; +}; + +NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_pmgr_pwr_policy_desc_header , + sizeof(struct nv_pmu_pmgr_pwr_policy_desc_header )); + +struct nv_pmu_pmgr_pwr_policyrel_desc_header { + struct nv_pmu_boardobjgrp_e32 super; +}; + +NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_pmgr_pwr_policyrel_desc_header, + sizeof(struct nv_pmu_pmgr_pwr_policyrel_desc_header)); + +struct nv_pmu_pmgr_pwr_violation_desc_header { + struct nv_pmu_boardobjgrp_e32 super; +}; + +NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_pmgr_pwr_violation_desc_header, + sizeof(struct nv_pmu_pmgr_pwr_violation_desc_header)); +NV_PMU_MAKE_ALIGNED_UNION(nv_pmu_pmgr_pwr_violation_union, + sizeof(union nv_pmu_pmgr_pwr_violation_union)); + +struct nv_pmu_pmgr_pwr_policy_desc { + union nv_pmu_pmgr_pwr_policy_desc_header_aligned hdr; + union nv_pmu_pmgr_pwr_policy_union_aligned + policies[NV_PMU_PMGR_PWR_POLICY_MAX_POLICIES]; +}; + +struct nv_pmu_pmgr_pwr_policyrel_desc { + union nv_pmu_pmgr_pwr_policyrel_desc_header_aligned hdr; + union nv_pmu_pmgr_pwr_policy_relationship_union_aligned + policy_rels[NV_PMU_PMGR_PWR_POLICY_MAX_POLICY_RELATIONSHIPS]; +}; + +struct nv_pmu_pmgr_pwr_violation_desc { + union nv_pmu_pmgr_pwr_violation_desc_header_aligned hdr; + union nv_pmu_pmgr_pwr_violation_union_aligned + violations[CTRL_PMGR_PWR_VIOLATION_MAX]; +}; + +union nv_pmu_pmgr_pwr_policy_dmem_size { + union nv_pmu_pmgr_pwr_policy_desc_header_aligned policy_hdr; + union nv_pmu_pmgr_pwr_policy_union_aligned policy; + union nv_pmu_pmgr_pwr_policyrel_desc_header_aligned policy_rels_hdr; + union nv_pmu_pmgr_pwr_policy_relationship_union_aligned policy_rels; + union nv_pmu_pmgr_pwr_violation_desc_header_aligned violation_hdr; + union nv_pmu_pmgr_pwr_violation_union_aligned violation; +}; + +struct nv_pmu_pmgr_pwr_policy_pack { + struct nv_pmu_pmgr_pwr_policy_desc policies; + struct nv_pmu_pmgr_pwr_policyrel_desc policy_rels; + struct nv_pmu_pmgr_pwr_violation_desc violations; +}; + +#define NV_PMU_PMGR_CMD_ID_SET_OBJECT (0x00000000) + +#define NV_PMU_PMGR_MSG_ID_QUERY (0x00000002) + +#define NV_PMU_PMGR_CMD_ID_PWR_DEVICES_QUERY (0x00000001) + +#define NV_PMU_PMGR_CMD_ID_LOAD (0x00000006) + +#define NV_PMU_PMGR_CMD_ID_UNLOAD (0x00000007) + +struct nv_pmu_pmgr_cmd_set_object { + u8 cmd_type; + u8 pad[2]; + u8 object_type; + struct nv_pmu_allocation object; +}; + +#define NV_PMU_PMGR_SET_OBJECT_ALLOC_OFFSET (0x04) + +#define NV_PMU_PMGR_OBJECT_I2C_DEVICE_DESC_TABLE (0x00000000) + +#define NV_PMU_PMGR_OBJECT_PWR_DEVICE_DESC_TABLE (0x00000001) + +#define NV_PMU_PMGR_OBJECT_PWR_MONITOR (0x00000002) + +#define NV_PMU_PMGR_OBJECT_PWR_POLICY (0x00000005) + +struct nv_pmu_pmgr_pwr_devices_query_payload { + struct { + u32 powerm_w; + u32 voltageu_v; + u32 currentm_a; + } devices[CTRL_PMGR_PWR_DEVICES_MAX_DEVICES]; +}; + +struct nv_pmu_pmgr_cmd_pwr_devices_query { + u8 cmd_type; + u8 pad[3]; + u32 dev_mask; + struct nv_pmu_allocation payload; +}; + +#define NV_PMU_PMGR_PWR_DEVICES_QUERY_ALLOC_OFFSET (0x08) + +struct nv_pmu_pmgr_cmd_load { + u8 cmd_type; +}; + +struct nv_pmu_pmgr_cmd_unload { + u8 cmd_type; +}; + +struct nv_pmu_pmgr_cmd { + union { + u8 cmd_type; + struct nv_pmu_pmgr_cmd_set_object set_object; + struct nv_pmu_pmgr_cmd_pwr_devices_query pwr_dev_query; + struct nv_pmu_pmgr_cmd_load load; + struct nv_pmu_pmgr_cmd_unload unload; + }; +}; + +#define NV_PMU_PMGR_MSG_ID_SET_OBJECT (0x00000000) + +#define NV_PMU_PMGR_MSG_ID_LOAD (0x00000004) + +#define NV_PMU_PMGR_MSG_ID_UNLOAD (0x00000005) + +struct nv_pmu_pmgr_msg_set_object { + u8 msg_type; + bool b_success; + flcn_status flcnstatus; + u8 object_type; +}; + +struct nv_pmu_pmgr_msg_query { + u8 msg_type; + bool b_success; + flcn_status flcnstatus; + u8 cmd_type; +}; + +struct nv_pmu_pmgr_msg_load { + u8 msg_type; + bool b_success; + flcn_status flcnstatus; +}; + +struct nv_pmu_pmgr_msg_unload { + u8 msg_type; +}; + +struct nv_pmu_pmgr_msg { + union { + u8 msg_type; + struct nv_pmu_pmgr_msg_set_object set_object; + struct nv_pmu_pmgr_msg_query query; + struct nv_pmu_pmgr_msg_load load; + struct nv_pmu_pmgr_msg_unload unload; + }; +}; + +#endif