gpu: nvgpu: Remove unused code in perf unit

-Removed GV100 functions
-Removed Header and entry table macros which are not
used
-Removed unused structs in perf.h file

NVGPU-4341

Change-Id: Ia08f117af76edb08d645b60fdf36bf101bf865a1
Signed-off-by: rmylavarapu <rmylavarapu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2238870
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
rmylavarapu
2019-11-14 11:23:55 +05:30
committed by Alex Waterman
parent eab49bf020
commit 68b9455f51
11 changed files with 6 additions and 273 deletions

View File

@@ -734,8 +734,6 @@ pmu:
common/pmu/perf/vfe_equ.h,
common/pmu/perf/vfe_var.c,
common/pmu/perf/vfe_var.h,
common/pmu/perf/perf_gv100.c,
common/pmu/perf/perf_gv100.h,
common/pmu/perf/perf_ps35.c,
common/pmu/perf/perf_pstate.c,
common/pmu/perf/perf_pstate.h,

View File

@@ -150,7 +150,6 @@ nvgpu-y += \
common/pmu/perf/vfe_var.o \
common/pmu/perf/vfe_equ.o \
common/pmu/perf/pmu_perf.o \
common/pmu/perf/perf_gv100.o \
common/pmu/perf/perf_ps35.o \
common/pmu/perf/change_seq.o \
common/pmu/perf/perf_pstate.o \

View File

@@ -457,7 +457,6 @@ srcs += \
common/pmu/perf/pmu_perf.c \
common/pmu/perf/vfe_equ.c \
common/pmu/perf/vfe_var.c \
common/pmu/perf/perf_gv100.c \
common/pmu/perf/perf_ps35.c \
common/pmu/perf/perf_pstate.c \
common/pmu/perf/change_seq.c \

View File

@@ -1,137 +0,0 @@
/*
* GV100 PERF
*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/pmu.h>
#include <nvgpu/bug.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/boardobj.h>
#include <nvgpu/boardobjgrp_e32.h>
#include <nvgpu/pmu/clk/clk.h>
#include <nvgpu/pmu/perf.h>
#include <nvgpu/pmu/cmd.h>
#include "perf_gv100.h"
#include "pmu_perf.h"
static int pmu_set_boot_clk_runcb_fn(void *arg)
{
struct gk20a *g = (struct gk20a *)arg;
struct nvgpu_pmu *pmu = g->pmu;
struct nv_pmu_rpc_struct_perf_load rpc;
struct perf_pmupstate *perf_pmu = g->perf_pmu;
struct nvgpu_vfe_invalidate *vfe_init = &perf_pmu->vfe_init;
int status = 0;
nvgpu_log_fn(g, "thread start");
while (true) {
NVGPU_COND_WAIT_INTERRUPTIBLE(&vfe_init->wq,
(vfe_init->state_change ||
nvgpu_thread_should_stop(&vfe_init->state_task)), 0U);
if (nvgpu_thread_should_stop(&vfe_init->state_task)) {
break;
}
vfe_init->state_change = false;
(void) memset(&rpc, 0,
sizeof(struct nv_pmu_rpc_struct_perf_load));
PMU_RPC_EXECUTE_CPB(status, pmu, PERF, VFE_INVALIDATE, &rpc, 0);
if (status != 0) {
nvgpu_err(g, "Failed to execute RPC status=0x%x",
status);
}
}
return 0;
}
static int gv100_pmu_handle_perf_event(struct gk20a *g, void *pmumsg)
{
struct nv_pmu_perf_msg *msg = (struct nv_pmu_perf_msg *)pmumsg;
struct perf_pmupstate *perf_pmu = g->perf_pmu;
nvgpu_log_fn(g, " ");
switch (msg->msg_type) {
case NV_PMU_PERF_MSG_ID_VFE_CALLBACK:
perf_pmu->vfe_init.state_change = true;
nvgpu_cond_signal_interruptible(&perf_pmu->vfe_init.wq);
break;
default:
WARN_ON(true);
break;
}
return 0;
}
static int perf_pmu_init_vfe_perf_event(struct gk20a *g)
{
struct perf_pmupstate *perf_pmu = g->perf_pmu;
char thread_name[64];
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_cond_init(&perf_pmu->vfe_init.wq);
if (err != 0) {
nvgpu_err(g, "nvgpu_cond_init failed err=%d", err);
return err;
}
(void) snprintf(thread_name, sizeof(thread_name),
"nvgpu_vfe_invalidate_init_%s", g->name);
err = nvgpu_thread_create(&perf_pmu->vfe_init.state_task, g,
pmu_set_boot_clk_runcb_fn, thread_name);
if (err != 0) {
nvgpu_err(g, "failed to start nvgpu_vfe_invalidate_init thread");
}
return err;
}
int gv100_perf_pmu_vfe_load(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nv_pmu_rpc_struct_perf_load rpc;
int status = 0;
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perf_load));
PMU_RPC_EXECUTE_CPB(status, pmu, PERF, VFE_INVALIDATE, &rpc, 0);
if (status != 0) {
nvgpu_err(g, "Failed to execute RPC status=0x%x", status);
return status;
}
status = perf_pmu_init_vfe_perf_event(g);
if (status != 0) {
nvgpu_err(g, "perf_pmu_init_vfe_perf_event err=%d", status);
return status;
}
/*register call back for future VFE updates*/
g->ops.pmu_perf.handle_pmu_perf_event = gv100_pmu_handle_perf_event;
return status;
}

View File

@@ -1,34 +0,0 @@
/*
* GV100 PERF
*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_PERF_GV100_H
#define NVGPU_PERF_GV100_H
#include <nvgpu/pmu/pmuif/nvgpu_cmdif.h>
struct gk20a;
int gv100_perf_pmu_vfe_load(struct gk20a *g);
#endif /* NVGPU_PERF_GV100_H */

View File

@@ -365,11 +365,8 @@ static int devinit_get_vfe_equ_table(struct gk20a *g,
}
nvgpu_memcpy((u8 *)&vfeequs_tbl_header, vfeequs_tbl_ptr,
VBIOS_VFE_3X_HEADER_SIZE_07);
if (vfeequs_tbl_header.header_size == VBIOS_VFE_3X_HEADER_SIZE_07) {
hdrszfmt = VBIOS_VFE_3X_HEADER_SIZE_07;
} else if (vfeequs_tbl_header.header_size ==
VBIOS_VFE_3X_HEADER_SIZE_09) {
VBIOS_VFE_3X_HEADER_SIZE_09);
if (vfeequs_tbl_header.header_size == VBIOS_VFE_3X_HEADER_SIZE_09) {
hdrszfmt = VBIOS_VFE_3X_HEADER_SIZE_09;
nvgpu_memcpy((u8 *)&vfeequs_tbl_header, vfeequs_tbl_ptr, hdrszfmt);
} else {
@@ -379,12 +376,10 @@ static int devinit_get_vfe_equ_table(struct gk20a *g,
}
if (vfeequs_tbl_header.vfe_equ_entry_size ==
VBIOS_VFE_3X_EQU_ENTRY_SIZE_17) {
szfmt = VBIOS_VFE_3X_EQU_ENTRY_SIZE_17;
} else if (vfeequs_tbl_header.vfe_equ_entry_size ==
VBIOS_VFE_3X_EQU_ENTRY_SIZE_18) {
szfmt = VBIOS_VFE_3X_EQU_ENTRY_SIZE_18;
} else {
nvgpu_err(g, "Invalid VFE EQU entry size\n");
status = -EINVAL;
goto done;
}

View File

@@ -1240,11 +1240,8 @@ static int devinit_get_vfe_var_table(struct gk20a *g,
}
nvgpu_memcpy((u8 *)&vfevars_tbl_header, vfevars_tbl_ptr,
VBIOS_VFE_3X_HEADER_SIZE_07);
if (vfevars_tbl_header.header_size == VBIOS_VFE_3X_HEADER_SIZE_07) {
hdrszfmt = VBIOS_VFE_3X_HEADER_SIZE_07;
} else if (vfevars_tbl_header.header_size ==
VBIOS_VFE_3X_HEADER_SIZE_09) {
VBIOS_VFE_3X_HEADER_SIZE_09);
if (vfevars_tbl_header.header_size == VBIOS_VFE_3X_HEADER_SIZE_09) {
hdrszfmt = VBIOS_VFE_3X_HEADER_SIZE_09;
nvgpu_memcpy((u8 *)&vfevars_tbl_header, vfevars_tbl_ptr, hdrszfmt);
} else {
@@ -1256,9 +1253,6 @@ static int devinit_get_vfe_var_table(struct gk20a *g,
if (vfevars_tbl_header.vfe_var_entry_size ==
VBIOS_VFE_3X_VAR_ENTRY_SIZE_19) {
szfmt = VBIOS_VFE_3X_VAR_ENTRY_SIZE_19;
} else if (vfevars_tbl_header.vfe_var_entry_size ==
VBIOS_VFE_3X_VAR_ENTRY_SIZE_11) {
szfmt = VBIOS_VFE_3X_VAR_ENTRY_SIZE_11;
} else {
nvgpu_err(g, "Invalid VFE VAR Entry size\n");
status = -EINVAL;

View File

@@ -351,10 +351,8 @@ struct vbios_vfe_3x_header_struct {
u8 vfe_equ_rppm_entry_count;
} __attribute__((packed));
#define VBIOS_VFE_3X_HEADER_SIZE_07 0x07U
#define VBIOS_VFE_3X_HEADER_SIZE_09 0x09U
#define VBIOS_VFE_3X_VAR_ENTRY_SIZE_11 0x11U
#define VBIOS_VFE_3X_VAR_ENTRY_SIZE_19 0x19U
struct vbios_vfe_3x_var_entry_struct {
u8 type;
@@ -433,7 +431,6 @@ struct vbios_vfe_3x_var_entry_struct {
#define VBIOS_VFE_3X_VAR_ENTRY_PAR1_SSFUSE_HW_CORRECTION_OFFSET_MASK 0xFFFFFFFFU
#define VBIOS_VFE_3X_VAR_ENTRY_PAR1_SSFUSE_HW_CORRECTION_OFFSET_SHIFT 0U
#define VBIOS_VFE_3X_EQU_ENTRY_SIZE_17 0x17U
#define VBIOS_VFE_3X_EQU_ENTRY_SIZE_18 0x18U
struct vbios_vfe_3x_equ_entry_struct {

View File

@@ -52,7 +52,6 @@ struct pmu_cmd {
struct pmu_zbc_cmd zbc;
struct pmu_acr_cmd acr;
struct nv_pmu_boardobj_cmd boardobj;
struct nv_pmu_perf_cmd perf;
struct nv_pmu_volt_cmd volt;
struct nv_pmu_clk_cmd clk;
struct nv_pmu_pmgr_cmd pmgr;

View File

@@ -71,7 +71,6 @@ struct pmu_msg {
struct pmu_rc_msg rc;
struct pmu_acr_msg acr;
struct nv_pmu_boardobj_msg boardobj;
struct nv_pmu_perf_msg perf;
struct nv_pmu_volt_msg volt;
struct nv_pmu_clk_msg clk;
struct nv_pmu_pmgr_msg pmgr;

View File

@@ -59,97 +59,21 @@ struct nv_pmu_rpc_struct_perf_load {
u32 scratch[1];
};
struct nv_pmu_perf_cmd_set_object {
u8 cmd_type;
u8 pad[2];
u8 object_type;
struct nv_pmu_allocation object;
};
#define NV_PMU_PERF_SET_OBJECT_ALLOC_OFFSET \
(offsetof(struct nv_pmu_perf_cmd_set_object, object))
/* RPC IDs */
#define NV_PMU_PERF_RPC_ID_VFE_LOAD (0x00000001U)
/*
* Command requesting execution of the perf RPC.
*/
struct nv_pmu_perf_cmd_rpc {
u8 cmd_type;
u8 pad[3];
struct nv_pmu_allocation request;
};
#define NV_PMU_PERF_CMD_RPC_ALLOC_OFFSET \
((u32)offsetof(struct nv_pmu_perf_cmd_rpc, request))
/*
* Simply a union of all specific PERF commands. Forms the general packet
* exchanged between the Kernel and PMU when sending and receiving PERF commands
* (respectively).
*/
struct nv_pmu_perf_cmd {
union {
u8 cmd_type;
struct nv_pmu_perf_cmd_set_object set_object;
struct nv_pmu_boardobj_cmd_grp grp_set;
struct nv_pmu_boardobj_cmd_grp grp_get_status;
};
};
/*
* Defines the data structure used to invoke PMU perf RPCs. Same structure is
* used to return the result of the RPC execution.
*/
struct nv_pmu_perf_rpc {
u8 function;
bool b_supported;
bool b_success;
falcon_status flcn_status;
union {
struct nv_pmu_perf_rpc_vfe_equ_eval vfe_equ_eval;
struct nv_pmu_perf_rpc_vfe_load vfe_load;
} params;
};
/* PERF Message-type Definitions */
#define NV_PMU_PERF_MSG_ID_RPC (0x00000003U)
#define NV_PMU_PERF_MSG_ID_BOARDOBJ_GRP_SET (0x00000004U)
#define NV_PMU_PERF_MSG_ID_BOARDOBJ_GRP_GET_STATUS (0x00000006U)
#define NV_PMU_PERF_MSG_ID_VFE_CALLBACK (0x00000005U)
#define NV_PMU_PERF_MSG_ID_CHANGE_SEQ_COMPLETION (0x00000007U)
#define NV_PMU_PERF_MSG_ID_PSTATES_INVALIDATE (0x00000008U)
/* PERF RPC ID Definitions */
#define NV_PMU_RPC_ID_PERF_VFE_CALLBACK 0x01U
#define NV_PMU_RPC_ID_PERF_SEQ_COMPLETION 0x02U
#define NV_PMU_RPC_ID_PERF_PSTATES_INVALIDATE 0x03U
/*
* Message carrying the result of the perf RPC execution.
*/
struct nv_pmu_perf_msg_rpc {
u8 msg_type;
u8 rsvd[3];
struct nv_pmu_allocation response;
};
#define NV_PMU_PERF_MSG_RPC_ALLOC_OFFSET \
((u32)offsetof(struct nv_pmu_perf_msg_rpc, response))
/*
* Simply a union of all specific PERF messages. Forms the general packet
* exchanged between the Kernel and PMU when sending and receiving PERF messages
* (respectively).
*/
struct nv_pmu_perf_msg {
union {
u8 msg_type;
struct nv_pmu_perf_msg_rpc rpc;
struct nv_pmu_boardobj_msg_grp grp_set;
};
};
struct pmu_nvgpu_rpc_perf_event {
struct pmu_hdr msg_hdr;