diff --git a/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c b/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c index b672188b5..210f4608c 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/change_seq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -158,21 +158,6 @@ static void build_change_seq_boot (struct gk20a *g) return; } -static int perf_pmu_load(struct gk20a *g) -{ - int status = 0; - struct nv_pmu_rpc_struct_perf_load rpc; - struct nvgpu_pmu *pmu = &g->pmu; - - (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perf_load)); - PMU_RPC_EXECUTE_CPB(status, pmu, PERF, LOAD, &rpc, 0); - if (status != 0) { - nvgpu_err(g, "Failed to execute RPC status=0x%x", - status); - } - return status; -} - int nvgpu_perf_change_seq_pmu_setup(struct gk20a *g) { struct nv_pmu_rpc_perf_change_seq_info_get info_get; @@ -251,12 +236,6 @@ int nvgpu_perf_change_seq_pmu_setup(struct gk20a *g) goto perf_change_seq_pmu_setup_exit; } - /* Perf Load*/ - status = perf_pmu_load(g); - if (status != 0) { - nvgpu_err(g, "Failed to Load Perf"); - } - perf_change_seq_pmu_setup_exit: return status; } diff --git a/drivers/gpu/nvgpu/common/pmu/perf/perf_tu104.c b/drivers/gpu/nvgpu/common/pmu/perf/perf_tu104.c index 18a10a01b..efe132458 100644 --- a/drivers/gpu/nvgpu/common/pmu/perf/perf_tu104.c +++ b/drivers/gpu/nvgpu/common/pmu/perf/perf_tu104.c @@ -53,11 +53,12 @@ static int pmu_set_boot_clk_runcb_fn(void *arg) (void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_struct_perf_load)); - PMU_RPC_EXECUTE_CPB(status, pmu, PERF, LOAD, &rpc, 0); + PMU_RPC_EXECUTE_CPB(status, pmu, PERF, VFE_INVALIDATE, &rpc, 0); if (status != 0) { nvgpu_err(g, "Failed to execute RPC status=0x%x", status); } + nvgpu_clk_arb_schedule_vf_table_update(g); } return 0; @@ -72,8 +73,7 @@ static int tu104_pmu_handle_perf_event(struct gk20a *g, void *pmumsg) switch (msg->msg_type) { case NV_PMU_PERF_MSG_ID_VFE_CALLBACK: perf_pmu->vfe_init.state_change = true; - nvgpu_cond_signal(&perf_pmu->vfe_init.wq); - nvgpu_clk_arb_schedule_vf_table_update(g); + (void) nvgpu_cond_signal(&perf_pmu->vfe_init.wq); break; case NV_PMU_PERF_MSG_ID_CHANGE_SEQ_COMPLETION: nvgpu_log_fn(g, "Change Seq Completed"); diff --git a/drivers/gpu/nvgpu/common/pmu/pstate/pstate.c b/drivers/gpu/nvgpu/common/pmu/pstate/pstate.c index e131bca0d..b66c5c694 100644 --- a/drivers/gpu/nvgpu/common/pmu/pstate/pstate.c +++ b/drivers/gpu/nvgpu/common/pmu/pstate/pstate.c @@ -302,13 +302,6 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g) } } - if (g->ops.pmu_perf.support_vfe) { - err = g->ops.clk.perf_pmu_vfe_load(g); - if (err != 0) { - return err; - } - } - if (g->ops.clk.support_pmgr_domain) { err = pmgr_domain_pmu_setup(g); } @@ -320,6 +313,12 @@ int gk20a_init_pstate_pmu_support(struct gk20a *g) } } + if (g->ops.pmu_perf.support_vfe) { + err = g->ops.clk.perf_pmu_vfe_load(g); + if (err != 0) { + return err; + } + } return err; }