diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index 8d4dc91ec..27854644d 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -103,6 +103,7 @@ nvgpu-y += common/bus/bus_gk20a.o \ common/pmu/pmu_fw.o \ common/pmu/pmu_pg.o \ common/pmu/pmu_perfmon.o \ + common/pmu/pmu_super_surface.o \ common/pmu/pmu_debug.o \ common/pmu/pmu_gk20a.o \ common/pmu/pmu_gm20b.o \ diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index f3c485564..b66dfa033 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -142,6 +142,7 @@ srcs += common/sim.c \ common/pmu/pmu_fw.c \ common/pmu/pmu_pg.c \ common/pmu/pmu_perfmon.c \ + common/pmu/pmu_super_surface.c \ common/pmu/pmu_debug.c \ common/pmu/pmu_gk20a.c \ common/pmu/pmu_gm20b.c \ diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c index d1495b5f4..828d3fd3f 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu.c @@ -549,10 +549,13 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu, start, size, PMU_DMEM_ALLOC_ALIGNMENT, 0); } + if (g->ops.pmu.create_ssmd_lookup_table != NULL) { + g->ops.pmu.create_ssmd_lookup_table(pmu); + } + pmu->pmu_ready = true; nvgpu_pmu_state_change(g, PMU_STATE_INIT_RECEIVED, true); - exit: nvgpu_pmu_dbg(g, "init received end, err %x", err); return err; @@ -754,29 +757,6 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, return 0; } -int nvgpu_pmu_super_surface_alloc(struct gk20a *g, - struct nvgpu_mem *mem_surface, u32 size) -{ - struct vm_gk20a *vm = g->mm.pmu.vm; - int err = 0; - - nvgpu_log_fn(g, " "); - - err = nvgpu_dma_alloc_map(vm, size, mem_surface); - if (err != 0) { - nvgpu_err(g, "failed to allocate pmu suffer surface\n"); - err = -ENOMEM; - } - - return err; -} - -void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) -{ - nvgpu_dma_free(g, mem); - (void) memset(mem, 0, sizeof(struct nvgpu_mem)); -} - struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu) { return pmu->g; diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_super_surface.c b/drivers/gpu/nvgpu/common/pmu/pmu_super_surface.c new file mode 100644 index 000000000..c940b716f --- /dev/null +++ b/drivers/gpu/nvgpu/common/pmu/pmu_super_surface.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +int nvgpu_pmu_super_surface_alloc(struct gk20a *g, + struct nvgpu_mem *mem_surface, u32 size) +{ + struct vm_gk20a *vm = g->mm.pmu.vm; + int err = 0; + + nvgpu_log_fn(g, " "); + + err = nvgpu_dma_alloc_map(vm, size, mem_surface); + if (err != 0) { + nvgpu_err(g, "failed to allocate pmu suffer surface\n"); + } + + return err; +} + +void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) +{ + nvgpu_log_fn(g, " "); + + nvgpu_dma_free(g, mem); +} + +/* + * Lookup table to hold info about super surface member, + * here member ID from nv_pmu_super_surface_member_descriptor + * used as a index to store the member info in two different + * table, i.e one table is for SET ID TYPE & second table for + * GET_STATUS ID_TYPE. + */ +void nvgpu_pmu_create_ssmd_lookup_table(struct nvgpu_pmu *pmu) +{ + struct gk20a *g = pmu->g; + struct nv_pmu_super_surface_member_descriptor ssmd; + u32 ssmd_size = (u32)sizeof( + struct nv_pmu_super_surface_member_descriptor); + u32 idx = 0U; + + nvgpu_log_fn(g, " "); + + for (idx = 0U; idx < NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT; + idx++) { + (void) memset(&ssmd, 0x0, ssmd_size); + + nvgpu_mem_rd_n(g, &pmu->super_surface_buf, idx * ssmd_size, + &ssmd, ssmd_size); + + nvgpu_pmu_dbg(g, "ssmd: id-0x%x offset-0x%x size-%x rsvd-0x%x", + ssmd.id, ssmd.offset, ssmd.size, ssmd.rsvd); + + /* Check member type from ID member & update respective table*/ + if ((ssmd.id & + NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_SET) != 0U) { + /* + * clear member type from member ID as we create + * different table for each type & use ID as index + * during member info fetch. + */ + ssmd.id &= 0xFFFFU; + /*use member ID as index for lookup table too*/ + (void) memcpy(&pmu->ssmd_set[ssmd.id], &ssmd, + ssmd_size); + } else if ((ssmd.id & + NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_GET_STATUS) != 0U) { + /* + * clear member type from member ID as we create + * different table for each type & use ID as index + * during member info fetch. + */ + ssmd.id &= 0xFFFFU; + /*use member ID as index for lookup table too*/ + (void) memcpy(&pmu->ssmd_get_status[ssmd.id], &ssmd, + ssmd_size); + } else { + continue; + } + } +} + +u32 nvgpu_pmu_get_ss_member_set_offset(struct nvgpu_pmu *pmu, u32 member_id) +{ + return pmu->ssmd_set[member_id].offset; +} + +u32 nvgpu_pmu_get_ss_member_set_size(struct nvgpu_pmu *pmu, u32 member_id) +{ + return pmu->ssmd_set[member_id].size; +} + +u32 nvgpu_pmu_get_ss_member_get_status_offset(struct nvgpu_pmu *pmu, + u32 member_id) +{ + return pmu->ssmd_get_status[member_id].offset; +} + +u32 nvgpu_pmu_get_ss_member_get_status_size(struct nvgpu_pmu *pmu, + u32 member_id) +{ + return pmu->ssmd_get_status[member_id].size; +} + diff --git a/drivers/gpu/nvgpu/include/nvgpu/boardobjgrp.h b/drivers/gpu/nvgpu/include/nvgpu/boardobjgrp.h index bc0bc8337..43b8440c3 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/boardobjgrp.h +++ b/drivers/gpu/nvgpu/include/nvgpu/boardobjgrp.h @@ -336,8 +336,10 @@ do { \ NV_PMU_##ENG##_MSG_ID_BOARDOBJ_GRP_SET, /* msgid */ \ (u32)sizeof(union nv_pmu_##eng##_##class##_boardobjgrp_set_header_aligned), \ (u32)sizeof(union nv_pmu_##eng##_##class##_boardobj_set_union_aligned), \ - (u32)sizeof(struct nv_pmu_##eng##_##class##_boardobj_grp_set), \ - (u32)offsetof(struct nv_pmu_super_surface, eng.class##_grp_set), \ + (u32)nvgpu_pmu_get_ss_member_set_size(&g->pmu, \ + NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \ + (u32)nvgpu_pmu_get_ss_member_set_offset(&g->pmu, \ + NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \ NV_PMU_RPC_ID_##ENG##_BOARD_OBJ_GRP_CMD) #define BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT_35(g, pboardobjgrp, eng, ENG, \ @@ -364,8 +366,10 @@ do { \ NV_PMU_##ENG##_MSG_ID_BOARDOBJ_GRP_GET_STATUS, /* msgid */ \ (u32)sizeof(union nv_pmu_##eng##_##class##_boardobjgrp_get_status_header_aligned), \ (u32)sizeof(union nv_pmu_##eng##_##class##_boardobj_get_status_union_aligned), \ - (u32)sizeof(struct nv_pmu_##eng##_##class##_boardobj_grp_get_status), \ - (u32)offsetof(struct nv_pmu_super_surface, eng.class##_grp_get_status), \ + (u32)nvgpu_pmu_get_ss_member_get_status_size(&g->pmu, \ + NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \ + (u32)nvgpu_pmu_get_ss_member_get_status_offset(&g->pmu, \ + NV_PMU_SUPER_SURFACE_MEMBER_##CLASS##_GRP), \ NV_PMU_RPC_ID_##ENG##_BOARD_OBJ_GRP_CMD) #define BOARDOBJGRP_PMU_CMD_GRP_GET_STATUS_CONSTRUCT_35(g, pboardobjgrp, \ diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h index 7efc565dc..4348a4b2a 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h @@ -1229,6 +1229,7 @@ struct gpu_ops { u32 err_id, u64 err_addr, u64 err_cnt); } err_ops; + void (*create_ssmd_lookup_table)(struct nvgpu_pmu *pmu); } pmu; struct { int (*init_debugfs)(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h index 1498fe745..9aa460013 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h @@ -32,6 +32,7 @@ #include #include #include +#include #include #define nvgpu_pmu_dbg(g, fmt, args...) \ @@ -338,7 +339,13 @@ struct nvgpu_pmu { /* TBD: remove this if ZBC seq is fixed */ struct nvgpu_mem seq_buf; struct nvgpu_mem trace_buf; + + /* super surface members */ struct nvgpu_mem super_surface_buf; + struct nv_pmu_super_surface_member_descriptor + ssmd_set[NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT]; + struct nv_pmu_super_surface_member_descriptor + ssmd_get_status[NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT]; bool buf_loaded; @@ -537,6 +544,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, u16 size_rpc, u16 size_scratch, pmu_callback caller_cb, void *caller_cb_param, bool is_copy_back); + /* PMU wait*/ int pmu_wait_message_cond_status(struct nvgpu_pmu *pmu, u32 timeout_ms, void *var, u8 val); @@ -545,4 +553,15 @@ void pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, int nvgpu_pmu_wait_ready(struct gk20a *g); struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu); + +/* super surface */ +void nvgpu_pmu_create_ssmd_lookup_table(struct nvgpu_pmu *pmu); +u32 nvgpu_pmu_get_ss_member_set_offset(struct nvgpu_pmu *pmu, u32 member_id); +u32 nvgpu_pmu_get_ss_member_get_status_offset(struct nvgpu_pmu *pmu, + u32 member_id); +u32 nvgpu_pmu_get_ss_member_set_size(struct nvgpu_pmu *pmu, u32 member_id); +u32 nvgpu_pmu_get_ss_member_get_status_size(struct nvgpu_pmu *pmu, + u32 member_id); + #endif /* NVGPU_PMU_H */ + diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmu_super_surf_if.h b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmu_super_surf_if.h index dbd06ff29..5578ab8e8 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmu_super_surf_if.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmuif/gpmu_super_surf_if.h @@ -19,14 +19,80 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ + #ifndef NVGPU_PMUIF_GPMU_SUPER_SURF_IF_H #define NVGPU_PMUIF_GPMU_SUPER_SURF_IF_H #include +/* PMU super surface */ +/* 1MB Bytes for SUPER_SURFACE_SIZE */ +#define SUPER_SURFACE_SIZE (1024U * 1024U) +/* 64K Bytes for command queues */ +#define FBQ_CMD_QUEUES_SIZE (64U * 1024U) +/* 1K Bytes for message queue */ +#define FBQ_MSG_QUEUE_SIZE (1024U) +/* 512 Bytes for SUPER_SURFACE_MEMBER_DESCRIPTOR */ +#define SSMD_SIZE (512U) +/* 16 bytes for SUPER_SURFACE_HDR */ +#define SS_HDR_SIZE (16U) +#define SS_UNMAPPED_MEMBERS_SIZE (SUPER_SURFACE_SIZE - \ + (FBQ_CMD_QUEUES_SIZE + FBQ_MSG_QUEUE_SIZE + SSMD_SIZE + SS_HDR_SIZE)) + +/* + * Super surface member BIT identification used in member_mask indicating + * which members in the super surface are valid. + * + * The ordering here is very important because it defines the order of + * processing in the PMU and takes dependencies into consideration. + */ +#define NV_PMU_SUPER_SURFACE_MEMBER_THERM_DEVICE_GRP 0x00U +#define NV_PMU_SUPER_SURFACE_MEMBER_THERM_CHANNEL_GRP 0x01U +#define NV_PMU_SUPER_SURFACE_MEMBER_VFE_VAR_GRP 0x03U +#define NV_PMU_SUPER_SURFACE_MEMBER_VFE_EQU_GRP 0x04U +#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_DEVICE_GRP 0x0BU +#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_RAIL_GRP 0x0CU +#define NV_PMU_SUPER_SURFACE_MEMBER_VOLT_POLICY_GRP 0x0DU +#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_DOMAIN_GRP 0x12U +#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_PROG_GRP 0x13U +#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_VIN_DEVICE_GRP 0x15U +#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FLL_DEVICE_GRP 0x16U +#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_VF_POINT_GRP 0x17U +#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FREQ_CONTROLLER_GRP 0x18U +#define NV_PMU_SUPER_SURFACE_MEMBER_CLK_FREQ_DOMAIN_GRP 0x19U +#define NV_PMU_SUPER_SURFACE_MEMBER_CHANGE_SEQ_GRP 0x1EU + +#define NV_PMU_SUPER_SURFACE_MEMBER_COUNT 0x1FU + +#define NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT 32U + +struct nv_pmu_super_surface_member_descriptor { + /* The member ID (@see NV_PMU_SUPER_SURFACE_MEMBER_ID_). */ + u32 id; + + /* The sub-structure's byte offset within the super-surface. */ + u32 offset; + + /* The sub-structure's byte size (must always be properly aligned). */ + u32 size; + + /* Reserved (and preserving required size/alignment). */ + u32 rsvd; +}; + +/* + * Defines the structure of the @ nv_pmu_super_surface_member_descriptor::id + */ +#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_GROUP 0x0000U +#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_GROUP_INVALID 0xFFFFU +#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_SET BIT(16) +#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_TYPE_GET_STATUS BIT(17) +#define NV_RM_PMU_SUPER_SURFACE_MEMBER_ID_RSVD (0x00UL << 20U) + struct nv_pmu_super_surface_hdr { - u32 memberMask; - u16 dmemBufferSizeMax; + struct falc_u64 address; + u32 member_mask; + u16 dmem_buffer_size_max; }; NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_super_surface_hdr, @@ -37,6 +103,8 @@ NV_PMU_MAKE_ALIGNED_STRUCT(nv_pmu_super_surface_hdr, * NOTE: Any new substructures or entries must be aligned. */ struct nv_pmu_super_surface { + struct nv_pmu_super_surface_member_descriptor + ssmd[NV_PMU_SUPER_SURFACE_MEMBER_DESCRIPTOR_COUNT]; struct { struct nv_pmu_fbq_cmd_queues cmd_queues; @@ -45,56 +113,56 @@ struct nv_pmu_super_surface { union nv_pmu_super_surface_hdr_aligned hdr; - struct { - struct nv_pmu_volt_volt_device_boardobj_grp_set volt_device_grp_set; - struct nv_pmu_volt_volt_policy_boardobj_grp_set volt_policy_grp_set; - struct nv_pmu_volt_volt_rail_boardobj_grp_set volt_rail_grp_set; + union { + u8 ss_unmapped_members_rsvd[SS_UNMAPPED_MEMBERS_SIZE]; - struct nv_pmu_volt_volt_policy_boardobj_grp_get_status volt_policy_grp_get_status; - struct nv_pmu_volt_volt_rail_boardobj_grp_get_status volt_rail_grp_get_status; - struct nv_pmu_volt_volt_device_boardobj_grp_get_status volt_device_grp_get_status; - } volt; - struct { - struct nv_pmu_clk_clk_vin_device_boardobj_grp_set clk_vin_device_grp_set; - struct nv_pmu_clk_clk_domain_boardobj_grp_set clk_domain_grp_set; - struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set clk_freq_controller_grp_set; - u8 clk_rsvd2[0x200]; - struct nv_pmu_clk_clk_fll_device_boardobj_grp_set clk_fll_device_grp_set; - struct nv_pmu_clk_clk_prog_boardobj_grp_set clk_prog_grp_set; - struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_grp_set; - struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status clk_vin_device_grp_get_status; - struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status clk_fll_device_grp_get_status; - u8 clk_rsvd3[0x200]; - struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status clk_vf_point_grp_get_status; - struct nv_pmu_clk_clk_freq_domain_boardobj_grp_set clk_freq_domain_grp_set; - - u8 clk_rsvd[0x4250]; - } clk; - struct { - struct nv_pmu_perf_vfe_equ_boardobj_grp_set_pack vfe_equ_grp_set; - struct nv_pmu_perf_vfe_var_boardobj_grp_set_pack vfe_var_grp_set; - - struct nv_pmu_perf_vfe_var_boardobj_grp_get_status_pack vfe_var_grp_get_status; - u8 perf_rsvd[0x40790]; - u8 perfcf_rsvd[0x1eb0]; - } perf; - struct { - struct nv_pmu_therm_therm_channel_boardobj_grp_set therm_channel_grp_set; - struct nv_pmu_therm_therm_device_boardobj_grp_set therm_device_grp_set; - u8 therm_rsvd[0x1460]; - u8 rsvd[0xC580]; - } therm; - struct { - struct perf_change_seq_pmu_script script_curr; - struct perf_change_seq_pmu_script script_last; - struct perf_change_seq_pmu_script script_query; - } change_seq; - struct { - struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_grp_set; - struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_sec_grp_set; - struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status clk_vf_point_grp_get_status; - }clk_35; + /* + * Below members are only for reference to know + * supported boardobjs from nvgpu, should not be + * accessed any boardobj member from below list + * in nvgpu using these members, instead use ssmd + * member present above to know the offset of + * required boardobj from super surface in nvgpu + * */ + struct { + struct nv_pmu_volt_volt_device_boardobj_grp_set volt_device_grp_set; + struct nv_pmu_volt_volt_policy_boardobj_grp_set volt_policy_grp_set; + struct nv_pmu_volt_volt_rail_boardobj_grp_set volt_rail_grp_set; + struct nv_pmu_volt_volt_policy_boardobj_grp_get_status volt_policy_grp_get_status; + struct nv_pmu_volt_volt_rail_boardobj_grp_get_status volt_rail_grp_get_status; + } volt; + struct { + struct nv_pmu_clk_clk_vin_device_boardobj_grp_set clk_vin_device_grp_set; + struct nv_pmu_clk_clk_domain_boardobj_grp_set clk_domain_grp_set; + struct nv_pmu_clk_clk_freq_controller_boardobj_grp_set clk_freq_controller_grp_set; + struct nv_pmu_clk_clk_fll_device_boardobj_grp_set clk_fll_device_grp_set; + struct nv_pmu_clk_clk_prog_boardobj_grp_set clk_prog_grp_set; + struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_grp_set; + struct nv_pmu_clk_clk_vin_device_boardobj_grp_get_status clk_vin_device_grp_get_status; + struct nv_pmu_clk_clk_fll_device_boardobj_grp_get_status clk_fll_device_grp_get_status; + struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status clk_vf_point_grp_get_status; + struct nv_pmu_clk_clk_freq_domain_boardobj_grp_set clk_freq_domain_grp_set; + } clk; + struct { + struct nv_pmu_perf_vfe_equ_boardobj_grp_set_pack vfe_equ_grp_set; + struct nv_pmu_perf_vfe_var_boardobj_grp_set_pack vfe_var_grp_set; + struct nv_pmu_perf_vfe_var_boardobj_grp_get_status_pack vfe_var_grp_get_status; + } perf; + struct { + struct nv_pmu_therm_therm_channel_boardobj_grp_set therm_channel_grp_set; + struct nv_pmu_therm_therm_device_boardobj_grp_set therm_device_grp_set; + } therm; + struct { + struct perf_change_seq_pmu_script script_curr; + struct perf_change_seq_pmu_script script_last; + struct perf_change_seq_pmu_script script_query; + } change_seq; + struct { + struct nv_pmu_clk_clk_vf_point_boardobj_grp_set clk_vf_point_grp_set; + struct nv_pmu_clk_clk_vf_point_boardobj_grp_get_status clk_vf_point_grp_get_status; + }clk_35; + }; }; #endif /* NVGPU_PMUIF_GPMU_SUPER_SURF_IF_H */