gpu: nvgpu: Remove unused code in perf unit

-Removed GV100 functions
-Removed Header and entry table macros which are not
used
-Removed unused structs in perf.h file

NVGPU-4341

Change-Id: Ia08f117af76edb08d645b60fdf36bf101bf865a1
Signed-off-by: rmylavarapu <rmylavarapu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2238870
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
rmylavarapu
2019-11-14 11:23:55 +05:30
committed by Alex Waterman
parent eab49bf020
commit 68b9455f51
11 changed files with 6 additions and 273 deletions

View File

@@ -1,137 +0,0 @@
/*
* GV100 PERF
*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/pmu.h>
#include <nvgpu/bug.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/boardobj.h>
#include <nvgpu/boardobjgrp_e32.h>
#include <nvgpu/pmu/clk/clk.h>
#include <nvgpu/pmu/perf.h>
#include <nvgpu/pmu/cmd.h>
#include "perf_gv100.h"
#include "pmu_perf.h"
static int pmu_set_boot_clk_runcb_fn(void *arg)
{
struct gk20a *g = (struct gk20a *)arg;
struct nvgpu_pmu *pmu = g->pmu;
struct nv_pmu_rpc_struct_perf_load rpc;
struct perf_pmupstate *perf_pmu = g->perf_pmu;
struct nvgpu_vfe_invalidate *vfe_init = &perf_pmu->vfe_init;
int status = 0;
nvgpu_log_fn(g, "thread start");
while (true) {
NVGPU_COND_WAIT_INTERRUPTIBLE(&vfe_init->wq,
(vfe_init->state_change ||
nvgpu_thread_should_stop(&vfe_init->state_task)), 0U);
if (nvgpu_thread_should_stop(&vfe_init->state_task)) {
break;
}
vfe_init->state_change = false;
(void) memset(&rpc, 0,
sizeof(struct nv_pmu_rpc_struct_perf_load));
PMU_RPC_EXECUTE_CPB(status, pmu, PERF, VFE_INVALIDATE, &rpc, 0);
if (status != 0) {
nvgpu_err(g, "Failed to execute RPC status=0x%x",
status);
}
}
return 0;
}
static int gv100_pmu_handle_perf_event(struct gk20a *g, void *pmumsg)
{
struct nv_pmu_perf_msg *msg = (struct nv_pmu_perf_msg *)pmumsg;
struct perf_pmupstate *perf_pmu = g->perf_pmu;
nvgpu_log_fn(g, " ");
switch (msg->msg_type) {
case NV_PMU_PERF_MSG_ID_VFE_CALLBACK:
perf_pmu->vfe_init.state_change = true;
nvgpu_cond_signal_interruptible(&perf_pmu->vfe_init.wq);
break;
default:
WARN_ON(true);
break;
}
return 0;
}
static int perf_pmu_init_vfe_perf_event(struct gk20a *g)
{
struct perf_pmupstate *perf_pmu = g->perf_pmu;
char thread_name[64];
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_cond_init(&perf_pmu->vfe_init.wq);
if (err != 0) {
nvgpu_err(g, "nvgpu_cond_init failed err=%d", err);
return err;
}
(void) snprintf(thread_name, sizeof(thread_name),
"nvgpu_vfe_invalidate_init_%s", g->name);
err = nvgpu_thread_create(&perf_pmu->vfe_init.state_task, g,
pmu_set_boot_clk_runcb_fn, thread_name);
if (err != 0) {
nvgpu_err(g, "failed to start nvgpu_vfe_invalidate_init thread");
}
return err;
}
int gv100_perf_pmu_vfe_load(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nv_pmu_rpc_struct_perf_load rpc;
int status = 0;
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perf_load));
PMU_RPC_EXECUTE_CPB(status, pmu, PERF, VFE_INVALIDATE, &rpc, 0);
if (status != 0) {
nvgpu_err(g, "Failed to execute RPC status=0x%x", status);
return status;
}
status = perf_pmu_init_vfe_perf_event(g);
if (status != 0) {
nvgpu_err(g, "perf_pmu_init_vfe_perf_event err=%d", status);
return status;
}
/*register call back for future VFE updates*/
g->ops.pmu_perf.handle_pmu_perf_event = gv100_pmu_handle_perf_event;
return status;
}

View File

@@ -1,34 +0,0 @@
/*
* GV100 PERF
*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_PERF_GV100_H
#define NVGPU_PERF_GV100_H
#include <nvgpu/pmu/pmuif/nvgpu_cmdif.h>
struct gk20a;
int gv100_perf_pmu_vfe_load(struct gk20a *g);
#endif /* NVGPU_PERF_GV100_H */

View File

@@ -365,11 +365,8 @@ static int devinit_get_vfe_equ_table(struct gk20a *g,
}
nvgpu_memcpy((u8 *)&vfeequs_tbl_header, vfeequs_tbl_ptr,
VBIOS_VFE_3X_HEADER_SIZE_07);
if (vfeequs_tbl_header.header_size == VBIOS_VFE_3X_HEADER_SIZE_07) {
hdrszfmt = VBIOS_VFE_3X_HEADER_SIZE_07;
} else if (vfeequs_tbl_header.header_size ==
VBIOS_VFE_3X_HEADER_SIZE_09) {
VBIOS_VFE_3X_HEADER_SIZE_09);
if (vfeequs_tbl_header.header_size == VBIOS_VFE_3X_HEADER_SIZE_09) {
hdrszfmt = VBIOS_VFE_3X_HEADER_SIZE_09;
nvgpu_memcpy((u8 *)&vfeequs_tbl_header, vfeequs_tbl_ptr, hdrszfmt);
} else {
@@ -379,12 +376,10 @@ static int devinit_get_vfe_equ_table(struct gk20a *g,
}
if (vfeequs_tbl_header.vfe_equ_entry_size ==
VBIOS_VFE_3X_EQU_ENTRY_SIZE_17) {
szfmt = VBIOS_VFE_3X_EQU_ENTRY_SIZE_17;
} else if (vfeequs_tbl_header.vfe_equ_entry_size ==
VBIOS_VFE_3X_EQU_ENTRY_SIZE_18) {
szfmt = VBIOS_VFE_3X_EQU_ENTRY_SIZE_18;
} else {
nvgpu_err(g, "Invalid VFE EQU entry size\n");
status = -EINVAL;
goto done;
}

View File

@@ -1240,11 +1240,8 @@ static int devinit_get_vfe_var_table(struct gk20a *g,
}
nvgpu_memcpy((u8 *)&vfevars_tbl_header, vfevars_tbl_ptr,
VBIOS_VFE_3X_HEADER_SIZE_07);
if (vfevars_tbl_header.header_size == VBIOS_VFE_3X_HEADER_SIZE_07) {
hdrszfmt = VBIOS_VFE_3X_HEADER_SIZE_07;
} else if (vfevars_tbl_header.header_size ==
VBIOS_VFE_3X_HEADER_SIZE_09) {
VBIOS_VFE_3X_HEADER_SIZE_09);
if (vfevars_tbl_header.header_size == VBIOS_VFE_3X_HEADER_SIZE_09) {
hdrszfmt = VBIOS_VFE_3X_HEADER_SIZE_09;
nvgpu_memcpy((u8 *)&vfevars_tbl_header, vfevars_tbl_ptr, hdrszfmt);
} else {
@@ -1256,9 +1253,6 @@ static int devinit_get_vfe_var_table(struct gk20a *g,
if (vfevars_tbl_header.vfe_var_entry_size ==
VBIOS_VFE_3X_VAR_ENTRY_SIZE_19) {
szfmt = VBIOS_VFE_3X_VAR_ENTRY_SIZE_19;
} else if (vfevars_tbl_header.vfe_var_entry_size ==
VBIOS_VFE_3X_VAR_ENTRY_SIZE_11) {
szfmt = VBIOS_VFE_3X_VAR_ENTRY_SIZE_11;
} else {
nvgpu_err(g, "Invalid VFE VAR Entry size\n");
status = -EINVAL;