mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: moved pg out from pmu_gk20a.c/h
- moved pg related code to pmu_pg.c under common/pmu folder PG state machine support methods PG ACK handlers AELPG methods PG enable/disable methods -prepended with nvgpu_ for elpg/aelpg global methods by replacing gk20a_ JIRA NVGPU-97 Change-Id: I2148a69ff86b5c5d43c521ff6e241db84afafd82 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1498363 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
45355f00e7
commit
c18364d0c4
@@ -61,6 +61,7 @@ nvgpu-y := \
|
||||
common/pmu/pmu.o \
|
||||
common/pmu/pmu_ipc.o \
|
||||
common/pmu/pmu_fw.o \
|
||||
common/pmu/pmu_pg.o \
|
||||
gk20a/gk20a.o \
|
||||
gk20a/bus_gk20a.o \
|
||||
gk20a/pramin_gk20a.o \
|
||||
|
||||
@@ -276,15 +276,15 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
|
||||
/* Init reg with prod values*/
|
||||
if (g->ops.pmu.pmu_setup_elpg)
|
||||
g->ops.pmu.pmu_setup_elpg(g);
|
||||
gk20a_pmu_enable_elpg(g);
|
||||
nvgpu_pmu_enable_elpg(g);
|
||||
}
|
||||
|
||||
nvgpu_udelay(50);
|
||||
|
||||
/* Enable AELPG */
|
||||
if (g->aelpg_enabled) {
|
||||
gk20a_aelpg_init(g);
|
||||
gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
|
||||
nvgpu_aelpg_init(g);
|
||||
nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -398,7 +398,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
|
||||
nvgpu_pmu_get_pg_stats(g,
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
|
||||
|
||||
gk20a_pmu_disable_elpg(g);
|
||||
nvgpu_pmu_disable_elpg(g);
|
||||
pmu->initialized = false;
|
||||
|
||||
/* update the s/w ELPG residency counters */
|
||||
|
||||
719
drivers/gpu/nvgpu/common/pmu/pmu_pg.c
Normal file
719
drivers/gpu/nvgpu/common/pmu/pmu_pg.c
Normal file
@@ -0,0 +1,719 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
/* state transition :
|
||||
* OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
|
||||
* ON => OFF is always synchronized
|
||||
*/
|
||||
/* elpg is off */
|
||||
#define PMU_ELPG_STAT_OFF 0
|
||||
/* elpg is on */
|
||||
#define PMU_ELPG_STAT_ON 1
|
||||
/* elpg is off, ALLOW cmd has been sent, wait for ack */
|
||||
#define PMU_ELPG_STAT_ON_PENDING 2
|
||||
/* elpg is on, DISALLOW cmd has been sent, wait for ack */
|
||||
#define PMU_ELPG_STAT_OFF_PENDING 3
|
||||
/* elpg is off, caller has requested on, but ALLOW
|
||||
* cmd hasn't been sent due to ENABLE_ALLOW delay
|
||||
*/
|
||||
#define PMU_ELPG_STAT_OFF_ON_PENDING 4
|
||||
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
|
||||
|
||||
static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = param;
|
||||
struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "ELPG cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
}
|
||||
|
||||
switch (elpg_msg->msg) {
|
||||
case PMU_PG_ELPG_MSG_INIT_ACK:
|
||||
nvgpu_pmu_dbg(g, "INIT_PG is ack from PMU, eng - %d",
|
||||
elpg_msg->engine_id);
|
||||
break;
|
||||
case PMU_PG_ELPG_MSG_ALLOW_ACK:
|
||||
nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d",
|
||||
elpg_msg->engine_id);
|
||||
if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_ON;
|
||||
else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
|
||||
break;
|
||||
case PMU_PG_ELPG_MSG_DISALLOW_ACK:
|
||||
nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d",
|
||||
elpg_msg->engine_id);
|
||||
|
||||
if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_OFF;
|
||||
else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
|
||||
|
||||
if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
|
||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||
g->ops.pmu.pmu_pg_engines_feature_list(g,
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
|
||||
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
|
||||
pmu->initialized = true;
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
|
||||
false);
|
||||
WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
|
||||
/* make status visible */
|
||||
smp_mb();
|
||||
} else
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
|
||||
true);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g,
|
||||
"unsupported ELPG message : 0x%04x", elpg_msg->msg);
|
||||
}
|
||||
}
|
||||
|
||||
/* PG enable/disable */
|
||||
int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
|
||||
{
|
||||
u32 status = 0;
|
||||
|
||||
if (enable_pg == true) {
|
||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||
g->ops.pmu.pmu_pg_engines_feature_list(g,
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
|
||||
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
|
||||
if (g->ops.pmu.pmu_lpwr_enable_pg)
|
||||
status = g->ops.pmu.pmu_lpwr_enable_pg(g,
|
||||
true);
|
||||
} else if (g->support_pmu && g->can_elpg)
|
||||
status = nvgpu_pmu_enable_elpg(g);
|
||||
} else if (enable_pg == false) {
|
||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||
g->ops.pmu.pmu_pg_engines_feature_list(g,
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
|
||||
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
|
||||
if (g->ops.pmu.pmu_lpwr_disable_pg)
|
||||
status = g->ops.pmu.pmu_lpwr_disable_pg(g,
|
||||
true);
|
||||
} else if (g->support_pmu && g->can_elpg)
|
||||
status = nvgpu_pmu_disable_elpg(g);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq, status;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||
cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
|
||||
|
||||
/* no need to wait ack for ELPG enable but set
|
||||
* pending to sync with follow up ELPG disable
|
||||
*/
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
|
||||
else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW");
|
||||
status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
|
||||
PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
|
||||
pmu, &seq, ~0);
|
||||
WARN_ON(status != 0);
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_enable_elpg(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
u32 pg_engine_id;
|
||||
u32 pg_engine_id_list = 0;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!g->support_pmu)
|
||||
return ret;
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->elpg_mutex);
|
||||
|
||||
pmu->elpg_refcnt++;
|
||||
if (pmu->elpg_refcnt <= 0)
|
||||
goto exit_unlock;
|
||||
|
||||
/* something is not right if we end up in following code path */
|
||||
if (unlikely(pmu->elpg_refcnt > 1)) {
|
||||
nvgpu_warn(g,
|
||||
"%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
|
||||
__func__, pmu->elpg_refcnt);
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
/* do NOT enable elpg until golden ctx is created,
|
||||
* which is related with the ctx that ELPG save and restore.
|
||||
*/
|
||||
if (unlikely(!gr->ctx_vars.golden_image_initialized))
|
||||
goto exit_unlock;
|
||||
|
||||
/* return if ELPG is already on or on_pending or off_on_pending */
|
||||
if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
|
||||
goto exit_unlock;
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list)
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
|
||||
for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
|
||||
pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
|
||||
pg_engine_id++) {
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
|
||||
ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
|
||||
continue;
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list)
|
||||
ret = pmu_enable_elpg_locked(g, pg_engine_id);
|
||||
}
|
||||
|
||||
exit_unlock:
|
||||
nvgpu_mutex_release(&pmu->elpg_mutex);
|
||||
nvgpu_log_fn(g, "done");
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_disable_elpg(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
int ret = 0;
|
||||
u32 pg_engine_id;
|
||||
u32 pg_engine_id_list = 0;
|
||||
u32 *ptr = NULL;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list)
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
|
||||
if (!g->support_pmu)
|
||||
return ret;
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->elpg_mutex);
|
||||
|
||||
pmu->elpg_refcnt--;
|
||||
if (pmu->elpg_refcnt > 0) {
|
||||
nvgpu_warn(g,
|
||||
"%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
|
||||
__func__, pmu->elpg_refcnt);
|
||||
WARN_ON(1);
|
||||
ret = 0;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* cancel off_on_pending and return */
|
||||
if (pmu->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_OFF;
|
||||
ret = 0;
|
||||
goto exit_reschedule;
|
||||
}
|
||||
/* wait if on_pending */
|
||||
else if (pmu->elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
|
||||
|
||||
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
|
||||
&pmu->elpg_stat, PMU_ELPG_STAT_ON);
|
||||
|
||||
if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
|
||||
nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
|
||||
pmu->elpg_stat);
|
||||
pmu_dump_elpg_stats(pmu);
|
||||
pmu_dump_falcon_stats(pmu);
|
||||
ret = -EBUSY;
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
/* return if ELPG is already off */
|
||||
else if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
|
||||
ret = 0;
|
||||
goto exit_reschedule;
|
||||
}
|
||||
|
||||
for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
|
||||
pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
|
||||
pg_engine_id++) {
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
|
||||
ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
|
||||
continue;
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||
cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
|
||||
else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state =
|
||||
PMU_ELPG_STAT_OFF_PENDING;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
ptr = &pmu->elpg_stat;
|
||||
else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
ptr = &pmu->mscg_transition_state;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
|
||||
PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
|
||||
pmu, &seq, ~0);
|
||||
|
||||
pmu_wait_message_cond(pmu,
|
||||
gk20a_get_gr_idle_timeout(g),
|
||||
ptr, PMU_ELPG_STAT_OFF);
|
||||
if (*ptr != PMU_ELPG_STAT_OFF) {
|
||||
nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
|
||||
pmu_dump_elpg_stats(pmu);
|
||||
pmu_dump_falcon_stats(pmu);
|
||||
ret = -EBUSY;
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exit_reschedule:
|
||||
exit_unlock:
|
||||
nvgpu_mutex_release(&pmu->elpg_mutex);
|
||||
nvgpu_log_fn(g, "done");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* PG init */
|
||||
static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = param;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "ELPG cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
}
|
||||
|
||||
switch (msg->msg.pg.stat.sub_msg_id) {
|
||||
case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
|
||||
nvgpu_pmu_dbg(g, "ALLOC_DMEM_OFFSET is acknowledged from PMU");
|
||||
pmu->stat_dmem_offset[msg->msg.pg.stat.engine_id] =
|
||||
msg->msg.pg.stat.data;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
gk20a_pmu_pg_idle_counter_config(g, pg_engine_id);
|
||||
|
||||
if (g->ops.pmu.pmu_pg_init_param)
|
||||
g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
|
||||
|
||||
/* init ELPG */
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||
cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
|
||||
|
||||
/* alloc dmem for powergating state log */
|
||||
pmu->stat_dmem_offset[pg_engine_id] = 0;
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
|
||||
cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
|
||||
cmd.cmd.pg.stat.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.stat.sub_cmd_id = PMU_PG_STAT_CMD_ALLOC_DMEM;
|
||||
cmd.cmd.pg.stat.data = 0;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
|
||||
pmu_handle_pg_stat_msg, pmu, &seq, ~0);
|
||||
|
||||
/* disallow ELPG initially
|
||||
* PMU ucode requires a disallow cmd before allow cmd
|
||||
*/
|
||||
/* set for wait_event PMU_ELPG_STAT_OFF */
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_OFF;
|
||||
else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||
cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_init_powergating(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
u32 pg_engine_id;
|
||||
u32 pg_engine_id_list = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list)
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
|
||||
gk20a_gr_wait_initialized(g);
|
||||
|
||||
for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
|
||||
pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
|
||||
pg_engine_id++) {
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||
pmu_pg_init_send(g, pg_engine_id);
|
||||
if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
|
||||
nvgpu_pmu_state_change(g,
|
||||
PMU_STATE_ELPG_BOOTING, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (g->ops.pmu.pmu_pg_param_post_init)
|
||||
g->ops.pmu.pmu_pg_param_post_init(g);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = param;
|
||||
struct pmu_pg_msg_eng_buf_stat *eng_buf_stat =
|
||||
&msg->msg.pg.eng_buf_stat;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "PGENG cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
}
|
||||
|
||||
pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
|
||||
if ((!pmu->buf_loaded) &&
|
||||
(pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
|
||||
nvgpu_err(g, "failed to load PGENG buffer");
|
||||
else {
|
||||
nvgpu_pmu_state_change(g, pmu->pmu_state, true);
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 desc;
|
||||
int err = 0;
|
||||
u32 gr_engine_id;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
|
||||
PMU_PG_CMD_ID_ENG_BUF_LOAD);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
|
||||
gr_engine_id);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
|
||||
PMU_PGENG_GR_BUFFER_IDX_FECS);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
|
||||
pmu->pg_buf.size);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
|
||||
u64_lo32(pmu->pg_buf.gpu_va));
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
|
||||
(u8)(pmu->pg_buf.gpu_va & 0xFF));
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
|
||||
PMU_DMAIDX_VIRT);
|
||||
|
||||
pmu->buf_loaded = false;
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
|
||||
pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
|
||||
return err;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 desc;
|
||||
u32 gr_engine_id;
|
||||
|
||||
gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
|
||||
PMU_PG_CMD_ID_ENG_BUF_LOAD);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
|
||||
gr_engine_id);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
|
||||
PMU_PGENG_GR_BUFFER_IDX_ZBC);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
|
||||
pmu->seq_buf.size);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
|
||||
u64_lo32(pmu->seq_buf.gpu_va));
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
|
||||
(u8)(pmu->seq_buf.gpu_va & 0xFF));
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
|
||||
PMU_DMAIDX_VIRT);
|
||||
|
||||
pmu->buf_loaded = false;
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
|
||||
pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
|
||||
}
|
||||
|
||||
/* stats */
|
||||
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
u32 pg_engine_id_list = 0;
|
||||
|
||||
if (!pmu->initialized) {
|
||||
pg_stat_data->ingating_time = 0;
|
||||
pg_stat_data->ungating_time = 0;
|
||||
pg_stat_data->gating_cnt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list)
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list)
|
||||
g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
|
||||
pg_stat_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* AELPG */
|
||||
static void ap_callback_init_and_enable_ctrl(
|
||||
struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 seq_desc, u32 status)
|
||||
{
|
||||
/* Define p_ap (i.e pointer to pmu_ap structure) */
|
||||
WARN_ON(!msg);
|
||||
|
||||
if (!status) {
|
||||
switch (msg->msg.pg.ap_msg.cmn.msg_id) {
|
||||
case PMU_AP_MSG_ID_INIT_ACK:
|
||||
nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT");
|
||||
break;
|
||||
|
||||
default:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"%s: Invalid Adaptive Power Message: %x\n",
|
||||
__func__, msg->msg.pg.ap_msg.cmn.msg_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Send an Adaptive Power (AP) related command to PMU */
|
||||
int nvgpu_pmu_ap_send_command(struct gk20a *g,
|
||||
union pmu_ap_cmd *p_ap_cmd, bool b_block)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
/* FIXME: where is the PG structure defined?? */
|
||||
u32 status = 0;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
pmu_callback p_callback = NULL;
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
|
||||
/* Copy common members */
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(union pmu_ap_cmd);
|
||||
|
||||
cmd.cmd.pg.ap_cmd.cmn.cmd_type = PMU_PG_CMD_ID_AP;
|
||||
cmd.cmd.pg.ap_cmd.cmn.cmd_id = p_ap_cmd->cmn.cmd_id;
|
||||
|
||||
/* Copy other members of command */
|
||||
switch (p_ap_cmd->cmn.cmd_id) {
|
||||
case PMU_AP_CMD_ID_INIT:
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT");
|
||||
cmd.cmd.pg.ap_cmd.init.pg_sampling_period_us =
|
||||
p_ap_cmd->init.pg_sampling_period_us;
|
||||
break;
|
||||
|
||||
case PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL:
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
|
||||
cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
|
||||
p_ap_cmd->init_and_enable_ctrl.ctrl_id;
|
||||
memcpy(
|
||||
(void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
|
||||
(void *)&(p_ap_cmd->init_and_enable_ctrl.params),
|
||||
sizeof(struct pmu_ap_ctrl_init_params));
|
||||
|
||||
p_callback = ap_callback_init_and_enable_ctrl;
|
||||
break;
|
||||
|
||||
case PMU_AP_CMD_ID_ENABLE_CTRL:
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_ENABLE_CTRL");
|
||||
cmd.cmd.pg.ap_cmd.enable_ctrl.ctrl_id =
|
||||
p_ap_cmd->enable_ctrl.ctrl_id;
|
||||
break;
|
||||
|
||||
case PMU_AP_CMD_ID_DISABLE_CTRL:
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_DISABLE_CTRL");
|
||||
cmd.cmd.pg.ap_cmd.disable_ctrl.ctrl_id =
|
||||
p_ap_cmd->disable_ctrl.ctrl_id;
|
||||
break;
|
||||
|
||||
case PMU_AP_CMD_ID_KICK_CTRL:
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_KICK_CTRL");
|
||||
cmd.cmd.pg.ap_cmd.kick_ctrl.ctrl_id =
|
||||
p_ap_cmd->kick_ctrl.ctrl_id;
|
||||
cmd.cmd.pg.ap_cmd.kick_ctrl.skip_count =
|
||||
p_ap_cmd->kick_ctrl.skip_count;
|
||||
break;
|
||||
|
||||
default:
|
||||
nvgpu_pmu_dbg(g, "%s: Invalid Adaptive Power command %d\n",
|
||||
__func__, p_ap_cmd->cmn.cmd_id);
|
||||
return 0x2f;
|
||||
}
|
||||
|
||||
status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
p_callback, pmu, &seq, ~0);
|
||||
|
||||
if (status) {
|
||||
nvgpu_pmu_dbg(g,
|
||||
"%s: Unable to submit Adaptive Power Command %d\n",
|
||||
__func__, p_ap_cmd->cmn.cmd_id);
|
||||
goto err_return;
|
||||
}
|
||||
|
||||
/* TODO: Implement blocking calls (b_block) */
|
||||
|
||||
err_return:
|
||||
return status;
|
||||
}
|
||||
|
||||
int nvgpu_aelpg_init(struct gk20a *g)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
/* Remove reliance on app_ctrl field. */
|
||||
union pmu_ap_cmd ap_cmd;
|
||||
|
||||
/* TODO: Check for elpg being ready? */
|
||||
ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
|
||||
ap_cmd.init.pg_sampling_period_us = g->pmu.aelpg_param[0];
|
||||
|
||||
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
|
||||
return status;
|
||||
}
|
||||
|
||||
int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
|
||||
{
|
||||
int status = 0;
|
||||
union pmu_ap_cmd ap_cmd;
|
||||
|
||||
/* TODO: Probably check if ELPG is ready? */
|
||||
ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
|
||||
ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
|
||||
ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
|
||||
g->pmu.aelpg_param[1];
|
||||
ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
|
||||
g->pmu.aelpg_param[2];
|
||||
ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
|
||||
g->pmu.aelpg_param[3];
|
||||
ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
|
||||
g->pmu.aelpg_param[4];
|
||||
|
||||
switch (ctrl_id) {
|
||||
case PMU_AP_CTRL_ID_GRAPHICS:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, true);
|
||||
return status;
|
||||
}
|
||||
@@ -1308,7 +1308,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
|
||||
return err;
|
||||
|
||||
/*do elpg disable before clock gating */
|
||||
gk20a_pmu_pg_global_enable(g, false);
|
||||
nvgpu_pmu_pg_global_enable(g, false);
|
||||
|
||||
if (g->ops.clock_gating.slcg_gr_load_gating_prod)
|
||||
g->ops.clock_gating.slcg_gr_load_gating_prod(g,
|
||||
@@ -1355,7 +1355,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
|
||||
g->ops.clock_gating.slcg_gr_load_gating_prod(g,
|
||||
g->slcg_enabled);
|
||||
|
||||
gk20a_pmu_pg_global_enable(g, true);
|
||||
nvgpu_pmu_pg_global_enable(g, true);
|
||||
|
||||
gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
|
||||
gk20a_idle(g);
|
||||
|
||||
@@ -1248,7 +1248,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
||||
|
||||
if (engine_enum == ENGINE_GR_GK20A) {
|
||||
if (g->support_pmu && g->can_elpg)
|
||||
gk20a_pmu_disable_elpg(g);
|
||||
nvgpu_pmu_disable_elpg(g);
|
||||
/* resetting engine will alter read/write index.
|
||||
* need to flush circular buffer before re-enabling FECS.
|
||||
*/
|
||||
@@ -1261,7 +1261,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
||||
enough, we do full init sequence */
|
||||
gk20a_gr_reset(g);
|
||||
if (g->support_pmu && g->can_elpg)
|
||||
gk20a_pmu_enable_elpg(g);
|
||||
nvgpu_pmu_enable_elpg(g);
|
||||
}
|
||||
if ((engine_enum == ENGINE_GRCE_GK20A) ||
|
||||
(engine_enum == ENGINE_ASYNC_CE_GK20A)) {
|
||||
@@ -1496,7 +1496,7 @@ static bool gk20a_fifo_handle_mmu_fault(
|
||||
|
||||
/* Disable power management */
|
||||
if (g->support_pmu && g->can_elpg)
|
||||
gk20a_pmu_disable_elpg(g);
|
||||
nvgpu_pmu_disable_elpg(g);
|
||||
if (g->ops.clock_gating.slcg_gr_load_gating_prod)
|
||||
g->ops.clock_gating.slcg_gr_load_gating_prod(g,
|
||||
false);
|
||||
@@ -1699,7 +1699,7 @@ static bool gk20a_fifo_handle_mmu_fault(
|
||||
|
||||
/* It is safe to enable ELPG again. */
|
||||
if (g->support_pmu && g->can_elpg)
|
||||
gk20a_pmu_enable_elpg(g);
|
||||
nvgpu_pmu_enable_elpg(g);
|
||||
|
||||
return verbose;
|
||||
}
|
||||
|
||||
@@ -456,18 +456,18 @@ static ssize_t elpg_enable_store(struct device *dev,
|
||||
*/
|
||||
if (val && !g->elpg_enabled) {
|
||||
g->elpg_enabled = true;
|
||||
gk20a_pmu_pg_global_enable(g, true);
|
||||
nvgpu_pmu_pg_global_enable(g, true);
|
||||
|
||||
} else if (!val && g->elpg_enabled) {
|
||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||
g->ops.pmu.pmu_pg_engines_feature_list(g,
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
|
||||
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
|
||||
gk20a_pmu_pg_global_enable(g, false);
|
||||
nvgpu_pmu_pg_global_enable(g, false);
|
||||
g->elpg_enabled = false;
|
||||
} else {
|
||||
g->elpg_enabled = false;
|
||||
gk20a_pmu_pg_global_enable(g, false);
|
||||
nvgpu_pmu_pg_global_enable(g, false);
|
||||
}
|
||||
}
|
||||
gk20a_idle(g);
|
||||
@@ -524,13 +524,13 @@ static ssize_t mscg_enable_store(struct device *dev,
|
||||
} else if (!val && g->mscg_enabled) {
|
||||
if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
|
||||
PMU_PG_LPWR_FEATURE_MSCG)) {
|
||||
gk20a_pmu_pg_global_enable(g, false);
|
||||
nvgpu_pmu_pg_global_enable(g, false);
|
||||
WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
|
||||
/* make status visible */
|
||||
smp_mb();
|
||||
g->mscg_enabled = false;
|
||||
if (g->elpg_enabled)
|
||||
gk20a_pmu_pg_global_enable(g, true);
|
||||
nvgpu_pmu_pg_global_enable(g, true);
|
||||
}
|
||||
g->mscg_enabled = false;
|
||||
}
|
||||
@@ -584,11 +584,11 @@ static ssize_t aelpg_param_store(struct device *dev,
|
||||
/* Disable AELPG */
|
||||
ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
|
||||
ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
|
||||
status = gk20a_pmu_ap_send_command(g, &ap_cmd, false);
|
||||
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
|
||||
|
||||
/* Enable AELPG */
|
||||
gk20a_aelpg_init(g);
|
||||
gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
|
||||
nvgpu_aelpg_init(g);
|
||||
nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
|
||||
}
|
||||
|
||||
return count;
|
||||
@@ -630,13 +630,13 @@ static ssize_t aelpg_enable_store(struct device *dev,
|
||||
/* Enable AELPG */
|
||||
ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
|
||||
ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
|
||||
status = gk20a_pmu_ap_send_command(g, &ap_cmd, false);
|
||||
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
|
||||
} else if (!val && g->aelpg_enabled) {
|
||||
g->aelpg_enabled = false;
|
||||
/* Disable AELPG */
|
||||
ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
|
||||
ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
|
||||
status = gk20a_pmu_ap_send_command(g, &ap_cmd, false);
|
||||
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
|
||||
}
|
||||
} else {
|
||||
dev_info(dev, "PMU is not ready, AELPG request failed\n");
|
||||
|
||||
@@ -3235,7 +3235,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
u32 lockboost;
|
||||
|
||||
if (g->support_pmu) {
|
||||
err = gk20a_pmu_disable_elpg(g);
|
||||
err = nvgpu_pmu_disable_elpg(g);
|
||||
if (err) {
|
||||
nvgpu_err(g,
|
||||
"failed to set disable elpg");
|
||||
@@ -3285,7 +3285,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
||||
args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO;
|
||||
|
||||
if (g->support_pmu && g->can_elpg)
|
||||
gk20a_pmu_enable_elpg(g);
|
||||
nvgpu_pmu_enable_elpg(g);
|
||||
}
|
||||
|
||||
/* init golden image, ELPG enabled after this is done */
|
||||
|
||||
@@ -547,14 +547,14 @@ void gk20a_gr_clear_sm_hww(struct gk20a *g,
|
||||
({ \
|
||||
int err = 0; \
|
||||
if (g->support_pmu && g->elpg_enabled) {\
|
||||
err = gk20a_pmu_disable_elpg(g); \
|
||||
err = nvgpu_pmu_disable_elpg(g); \
|
||||
if (err) \
|
||||
gk20a_pmu_enable_elpg(g); \
|
||||
nvgpu_pmu_enable_elpg(g); \
|
||||
} \
|
||||
if (!err) { \
|
||||
err = func; \
|
||||
if (g->support_pmu && g->elpg_enabled) \
|
||||
gk20a_pmu_enable_elpg(g); \
|
||||
nvgpu_pmu_enable_elpg(g); \
|
||||
} \
|
||||
err; \
|
||||
})
|
||||
|
||||
@@ -41,10 +41,6 @@
|
||||
#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
|
||||
#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
|
||||
|
||||
static void ap_callback_init_and_enable_ctrl(
|
||||
struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 seq_desc, u32 status);
|
||||
|
||||
bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
|
||||
{
|
||||
u32 i = 0, j = strlen(strings);
|
||||
@@ -488,6 +484,14 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
gk20a_writel(g, pwr_pmu_pg_idlefilth_r(pg_engine_id),
|
||||
PMU_PG_IDLE_THRESHOLD);
|
||||
gk20a_writel(g, pwr_pmu_pg_ppuidlefilth_r(pg_engine_id),
|
||||
PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
|
||||
}
|
||||
|
||||
int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
@@ -692,30 +696,6 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
|
||||
pwr_pmu_msgq_tail_val_f(*tail));
|
||||
}
|
||||
|
||||
static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = param;
|
||||
struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "PGENG cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
}
|
||||
|
||||
pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
|
||||
if ((!pmu->buf_loaded) &&
|
||||
(pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
|
||||
nvgpu_err(g, "failed to load PGENG buffer");
|
||||
else {
|
||||
nvgpu_pmu_state_change(g, pmu->pmu_state, true);
|
||||
}
|
||||
}
|
||||
|
||||
static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
@@ -750,80 +730,6 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
|
||||
|
||||
}
|
||||
|
||||
int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 desc;
|
||||
int err = 0;
|
||||
u32 gr_engine_id;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
|
||||
PMU_PG_CMD_ID_ENG_BUF_LOAD);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
|
||||
gr_engine_id);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
|
||||
PMU_PGENG_GR_BUFFER_IDX_FECS);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
|
||||
pmu->pg_buf.size);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
|
||||
u64_lo32(pmu->pg_buf.gpu_va));
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
|
||||
(u8)(pmu->pg_buf.gpu_va & 0xFF));
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
|
||||
PMU_DMAIDX_VIRT);
|
||||
|
||||
pmu->buf_loaded = false;
|
||||
gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
|
||||
pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
|
||||
return err;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 desc;
|
||||
u32 gr_engine_id;
|
||||
|
||||
gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
|
||||
PMU_PG_CMD_ID_ENG_BUF_LOAD);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
|
||||
gr_engine_id);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
|
||||
PMU_PGENG_GR_BUFFER_IDX_ZBC);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
|
||||
pmu->seq_buf.size);
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
|
||||
u64_lo32(pmu->seq_buf.gpu_va));
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
|
||||
(u8)(pmu->seq_buf.gpu_va & 0xFF));
|
||||
g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
|
||||
PMU_DMAIDX_VIRT);
|
||||
|
||||
pmu->buf_loaded = false;
|
||||
gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
|
||||
pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
|
||||
}
|
||||
|
||||
static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
|
||||
{
|
||||
gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
|
||||
@@ -896,184 +802,6 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
|
||||
gops->pmu.reset = gk20a_pmu_reset;
|
||||
}
|
||||
|
||||
static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = param;
|
||||
struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "ELPG cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
}
|
||||
|
||||
switch (elpg_msg->msg) {
|
||||
case PMU_PG_ELPG_MSG_INIT_ACK:
|
||||
gk20a_dbg_pmu("INIT_PG is ack from PMU, eng - %d",
|
||||
elpg_msg->engine_id);
|
||||
break;
|
||||
case PMU_PG_ELPG_MSG_ALLOW_ACK:
|
||||
gk20a_dbg_pmu("ALLOW is ack from PMU, eng - %d",
|
||||
elpg_msg->engine_id);
|
||||
if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_ON;
|
||||
else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
|
||||
break;
|
||||
case PMU_PG_ELPG_MSG_DISALLOW_ACK:
|
||||
gk20a_dbg_pmu("DISALLOW is ack from PMU, eng - %d",
|
||||
elpg_msg->engine_id);
|
||||
|
||||
if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_OFF;
|
||||
else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
|
||||
|
||||
if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
|
||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||
g->ops.pmu.pmu_pg_engines_feature_list(g,
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
|
||||
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
|
||||
pmu->initialized = true;
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
|
||||
false);
|
||||
WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
|
||||
/* make status visible */
|
||||
smp_mb();
|
||||
} else
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
|
||||
true);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g,
|
||||
"unsupported ELPG message : 0x%04x", elpg_msg->msg);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 handle, u32 status)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = param;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "ELPG cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
}
|
||||
|
||||
switch (msg->msg.pg.stat.sub_msg_id) {
|
||||
case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
|
||||
gk20a_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
|
||||
pmu->stat_dmem_offset[msg->msg.pg.stat.engine_id] =
|
||||
msg->msg.pg.stat.data;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
gk20a_writel(g, pwr_pmu_pg_idlefilth_r(pg_engine_id),
|
||||
PMU_PG_IDLE_THRESHOLD);
|
||||
gk20a_writel(g, pwr_pmu_pg_ppuidlefilth_r(pg_engine_id),
|
||||
PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
|
||||
|
||||
if (g->ops.pmu.pmu_pg_init_param)
|
||||
g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
|
||||
|
||||
/* init ELPG */
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||
cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT;
|
||||
|
||||
gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_INIT");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
|
||||
|
||||
/* alloc dmem for powergating state log */
|
||||
pmu->stat_dmem_offset[pg_engine_id] = 0;
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
|
||||
cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
|
||||
cmd.cmd.pg.stat.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.stat.sub_cmd_id = PMU_PG_STAT_CMD_ALLOC_DMEM;
|
||||
cmd.cmd.pg.stat.data = 0;
|
||||
|
||||
gk20a_dbg_pmu("cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
|
||||
pmu_handle_pg_stat_msg, pmu, &seq, ~0);
|
||||
|
||||
/* disallow ELPG initially
|
||||
PMU ucode requires a disallow cmd before allow cmd */
|
||||
/* set for wait_event PMU_ELPG_STAT_OFF */
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_OFF;
|
||||
else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||
cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
|
||||
|
||||
gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_DISALLOW");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_init_powergating(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
u32 pg_engine_id;
|
||||
u32 pg_engine_id_list = 0;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list)
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
|
||||
gk20a_gr_wait_initialized(g);
|
||||
|
||||
for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
|
||||
pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
|
||||
pg_engine_id++) {
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||
pmu_pg_init_send(g, pg_engine_id);
|
||||
if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
|
||||
nvgpu_pmu_state_change(g,
|
||||
PMU_STATE_ELPG_BOOTING, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (g->ops.pmu.pmu_pg_param_post_init)
|
||||
g->ops.pmu.pmu_pg_param_post_init(g);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
@@ -1355,7 +1083,7 @@ int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
|
||||
void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct pmu_pg_stats stats;
|
||||
@@ -1631,238 +1359,6 @@ void gk20a_pmu_isr(struct gk20a *g)
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
}
|
||||
|
||||
int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
|
||||
{
|
||||
u32 status = 0;
|
||||
|
||||
if (enable_pg == true) {
|
||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||
g->ops.pmu.pmu_pg_engines_feature_list(g,
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
|
||||
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
|
||||
if (g->ops.pmu.pmu_lpwr_enable_pg)
|
||||
status = g->ops.pmu.pmu_lpwr_enable_pg(g,
|
||||
true);
|
||||
} else if (g->support_pmu && g->can_elpg)
|
||||
status = gk20a_pmu_enable_elpg(g);
|
||||
} else if (enable_pg == false) {
|
||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||
g->ops.pmu.pmu_pg_engines_feature_list(g,
|
||||
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
|
||||
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
|
||||
if (g->ops.pmu.pmu_lpwr_disable_pg)
|
||||
status = g->ops.pmu.pmu_lpwr_disable_pg(g,
|
||||
true);
|
||||
} else if (g->support_pmu && g->can_elpg)
|
||||
status = gk20a_pmu_disable_elpg(g);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq, status;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||
cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
|
||||
|
||||
/* no need to wait ack for ELPG enable but set
|
||||
* pending to sync with follow up ELPG disable
|
||||
*/
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
|
||||
|
||||
else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
|
||||
|
||||
gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_ALLOW");
|
||||
status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
|
||||
PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
|
||||
pmu, &seq, ~0);
|
||||
WARN_ON(status != 0);
|
||||
|
||||
gk20a_dbg_fn("done");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gk20a_pmu_enable_elpg(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
u32 pg_engine_id;
|
||||
u32 pg_engine_id_list = 0;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (!g->support_pmu)
|
||||
return ret;
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->elpg_mutex);
|
||||
|
||||
pmu->elpg_refcnt++;
|
||||
if (pmu->elpg_refcnt <= 0)
|
||||
goto exit_unlock;
|
||||
|
||||
/* something is not right if we end up in following code path */
|
||||
if (unlikely(pmu->elpg_refcnt > 1)) {
|
||||
nvgpu_warn(g,
|
||||
"%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
|
||||
__func__, pmu->elpg_refcnt);
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
/* do NOT enable elpg until golden ctx is created,
|
||||
which is related with the ctx that ELPG save and restore. */
|
||||
if (unlikely(!gr->ctx_vars.golden_image_initialized))
|
||||
goto exit_unlock;
|
||||
|
||||
/* return if ELPG is already on or on_pending or off_on_pending */
|
||||
if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
|
||||
goto exit_unlock;
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list)
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
|
||||
for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
|
||||
pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
|
||||
pg_engine_id++) {
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
|
||||
ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
|
||||
continue;
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list)
|
||||
ret = gk20a_pmu_enable_elpg_locked(g, pg_engine_id);
|
||||
}
|
||||
|
||||
exit_unlock:
|
||||
nvgpu_mutex_release(&pmu->elpg_mutex);
|
||||
gk20a_dbg_fn("done");
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gk20a_pmu_disable_elpg(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
int ret = 0;
|
||||
u32 pg_engine_id;
|
||||
u32 pg_engine_id_list = 0;
|
||||
u32 *ptr = NULL;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list)
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
|
||||
if (!g->support_pmu)
|
||||
return ret;
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->elpg_mutex);
|
||||
|
||||
pmu->elpg_refcnt--;
|
||||
if (pmu->elpg_refcnt > 0) {
|
||||
nvgpu_warn(g,
|
||||
"%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
|
||||
__func__, pmu->elpg_refcnt);
|
||||
WARN_ON(1);
|
||||
ret = 0;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* cancel off_on_pending and return */
|
||||
if (pmu->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_OFF;
|
||||
ret = 0;
|
||||
goto exit_reschedule;
|
||||
}
|
||||
/* wait if on_pending */
|
||||
else if (pmu->elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
|
||||
|
||||
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
|
||||
&pmu->elpg_stat, PMU_ELPG_STAT_ON);
|
||||
|
||||
if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
|
||||
nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
|
||||
pmu->elpg_stat);
|
||||
pmu_dump_elpg_stats(pmu);
|
||||
pmu_dump_falcon_stats(pmu);
|
||||
ret = -EBUSY;
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
/* return if ELPG is already off */
|
||||
else if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
|
||||
ret = 0;
|
||||
goto exit_reschedule;
|
||||
}
|
||||
|
||||
for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
|
||||
pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
|
||||
pg_engine_id++) {
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
|
||||
ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
|
||||
continue;
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list) {
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE +
|
||||
sizeof(struct pmu_pg_cmd_elpg_cmd);
|
||||
cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
|
||||
cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
|
||||
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
|
||||
else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
pmu->mscg_transition_state =
|
||||
PMU_ELPG_STAT_OFF_PENDING;
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
ptr = &pmu->elpg_stat;
|
||||
else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
|
||||
ptr = &pmu->mscg_transition_state;
|
||||
|
||||
gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_DISALLOW");
|
||||
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
|
||||
PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
|
||||
pmu, &seq, ~0);
|
||||
|
||||
pmu_wait_message_cond(pmu,
|
||||
gk20a_get_gr_idle_timeout(g),
|
||||
ptr, PMU_ELPG_STAT_OFF);
|
||||
if (*ptr != PMU_ELPG_STAT_OFF) {
|
||||
nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
|
||||
pmu_dump_elpg_stats(pmu);
|
||||
pmu_dump_falcon_stats(pmu);
|
||||
ret = -EBUSY;
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exit_reschedule:
|
||||
exit_unlock:
|
||||
nvgpu_mutex_release(&pmu->elpg_mutex);
|
||||
gk20a_dbg_fn("done");
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
@@ -1947,173 +1443,3 @@ void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||
pg_stat_data->avg_entry_latency_us = stats.pg_avg_entry_time_us;
|
||||
pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
u32 pg_engine_id_list = 0;
|
||||
|
||||
if (!pmu->initialized) {
|
||||
pg_stat_data->ingating_time = 0;
|
||||
pg_stat_data->ungating_time = 0;
|
||||
pg_stat_data->gating_cnt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (g->ops.pmu.pmu_pg_supported_engines_list)
|
||||
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
|
||||
|
||||
if (BIT(pg_engine_id) & pg_engine_id_list)
|
||||
g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
|
||||
pg_stat_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Send an Adaptive Power (AP) related command to PMU */
|
||||
int gk20a_pmu_ap_send_command(struct gk20a *g,
|
||||
union pmu_ap_cmd *p_ap_cmd, bool b_block)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
/* FIXME: where is the PG structure defined?? */
|
||||
u32 status = 0;
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
pmu_callback p_callback = NULL;
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
|
||||
/* Copy common members */
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(union pmu_ap_cmd);
|
||||
|
||||
cmd.cmd.pg.ap_cmd.cmn.cmd_type = PMU_PG_CMD_ID_AP;
|
||||
cmd.cmd.pg.ap_cmd.cmn.cmd_id = p_ap_cmd->cmn.cmd_id;
|
||||
|
||||
/* Copy other members of command */
|
||||
switch (p_ap_cmd->cmn.cmd_id) {
|
||||
case PMU_AP_CMD_ID_INIT:
|
||||
gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_INIT");
|
||||
cmd.cmd.pg.ap_cmd.init.pg_sampling_period_us =
|
||||
p_ap_cmd->init.pg_sampling_period_us;
|
||||
break;
|
||||
|
||||
case PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL:
|
||||
gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
|
||||
cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
|
||||
p_ap_cmd->init_and_enable_ctrl.ctrl_id;
|
||||
memcpy(
|
||||
(void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
|
||||
(void *)&(p_ap_cmd->init_and_enable_ctrl.params),
|
||||
sizeof(struct pmu_ap_ctrl_init_params));
|
||||
|
||||
p_callback = ap_callback_init_and_enable_ctrl;
|
||||
break;
|
||||
|
||||
case PMU_AP_CMD_ID_ENABLE_CTRL:
|
||||
gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_ENABLE_CTRL");
|
||||
cmd.cmd.pg.ap_cmd.enable_ctrl.ctrl_id =
|
||||
p_ap_cmd->enable_ctrl.ctrl_id;
|
||||
break;
|
||||
|
||||
case PMU_AP_CMD_ID_DISABLE_CTRL:
|
||||
gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_DISABLE_CTRL");
|
||||
cmd.cmd.pg.ap_cmd.disable_ctrl.ctrl_id =
|
||||
p_ap_cmd->disable_ctrl.ctrl_id;
|
||||
break;
|
||||
|
||||
case PMU_AP_CMD_ID_KICK_CTRL:
|
||||
gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_KICK_CTRL");
|
||||
cmd.cmd.pg.ap_cmd.kick_ctrl.ctrl_id =
|
||||
p_ap_cmd->kick_ctrl.ctrl_id;
|
||||
cmd.cmd.pg.ap_cmd.kick_ctrl.skip_count =
|
||||
p_ap_cmd->kick_ctrl.skip_count;
|
||||
break;
|
||||
|
||||
default:
|
||||
gk20a_dbg_pmu("%s: Invalid Adaptive Power command %d\n",
|
||||
__func__, p_ap_cmd->cmn.cmd_id);
|
||||
return 0x2f;
|
||||
}
|
||||
|
||||
status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
p_callback, pmu, &seq, ~0);
|
||||
|
||||
if (status) {
|
||||
gk20a_dbg_pmu(
|
||||
"%s: Unable to submit Adaptive Power Command %d\n",
|
||||
__func__, p_ap_cmd->cmn.cmd_id);
|
||||
goto err_return;
|
||||
}
|
||||
|
||||
/* TODO: Implement blocking calls (b_block) */
|
||||
|
||||
err_return:
|
||||
return status;
|
||||
}
|
||||
|
||||
static void ap_callback_init_and_enable_ctrl(
|
||||
struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 seq_desc, u32 status)
|
||||
{
|
||||
/* Define p_ap (i.e pointer to pmu_ap structure) */
|
||||
WARN_ON(!msg);
|
||||
|
||||
if (!status) {
|
||||
switch (msg->msg.pg.ap_msg.cmn.msg_id) {
|
||||
case PMU_AP_MSG_ID_INIT_ACK:
|
||||
gk20a_dbg_pmu("reply PMU_AP_CMD_ID_INIT");
|
||||
break;
|
||||
|
||||
default:
|
||||
gk20a_dbg_pmu(
|
||||
"%s: Invalid Adaptive Power Message: %x\n",
|
||||
__func__, msg->msg.pg.ap_msg.cmn.msg_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int gk20a_aelpg_init(struct gk20a *g)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
/* Remove reliance on app_ctrl field. */
|
||||
union pmu_ap_cmd ap_cmd;
|
||||
|
||||
/* TODO: Check for elpg being ready? */
|
||||
ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
|
||||
ap_cmd.init.pg_sampling_period_us = g->pmu.aelpg_param[0];
|
||||
|
||||
status = gk20a_pmu_ap_send_command(g, &ap_cmd, false);
|
||||
return status;
|
||||
}
|
||||
|
||||
int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
|
||||
{
|
||||
int status = 0;
|
||||
union pmu_ap_cmd ap_cmd;
|
||||
|
||||
/* TODO: Probably check if ELPG is ready? */
|
||||
ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
|
||||
ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
|
||||
ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
|
||||
g->pmu.aelpg_param[1];
|
||||
ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
|
||||
g->pmu.aelpg_param[2];
|
||||
ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
|
||||
g->pmu.aelpg_param[3];
|
||||
ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
|
||||
g->pmu.aelpg_param[4];
|
||||
|
||||
switch (ctrl_id) {
|
||||
case PMU_AP_CTRL_ID_GRAPHICS:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
status = gk20a_pmu_ap_send_command(g, &ap_cmd, true);
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -35,51 +35,9 @@ struct nvgpu_firmware;
|
||||
#define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8
|
||||
#endif
|
||||
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
|
||||
|
||||
#define PMU_PG_IDLE_THRESHOLD_SIM 1000
|
||||
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
|
||||
/* TBD: QT or else ? */
|
||||
#define PMU_PG_IDLE_THRESHOLD 15000
|
||||
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000
|
||||
|
||||
#define PMU_PG_LPWR_FEATURE_RPPG 0x0
|
||||
#define PMU_PG_LPWR_FEATURE_MSCG 0x1
|
||||
|
||||
/* state transition :
|
||||
OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
|
||||
ON => OFF is always synchronized */
|
||||
#define PMU_ELPG_STAT_OFF 0 /* elpg is off */
|
||||
#define PMU_ELPG_STAT_ON 1 /* elpg is on */
|
||||
#define PMU_ELPG_STAT_ON_PENDING 2 /* elpg is off, ALLOW cmd has been sent, wait for ack */
|
||||
#define PMU_ELPG_STAT_OFF_PENDING 3 /* elpg is on, DISALLOW cmd has been sent, wait for ack */
|
||||
#define PMU_ELPG_STAT_OFF_ON_PENDING 4 /* elpg is off, caller has requested on, but ALLOW
|
||||
cmd hasn't been sent due to ENABLE_ALLOW delay */
|
||||
|
||||
#define PG_REQUEST_TYPE_GLOBAL 0x0
|
||||
#define PG_REQUEST_TYPE_PSTATE 0x1
|
||||
|
||||
#define PMU_MSCG_DISABLED 0
|
||||
#define PMU_MSCG_ENABLED 1
|
||||
|
||||
/* Default Sampling Period of AELPG */
|
||||
#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
|
||||
|
||||
/* Default values of APCTRL parameters */
|
||||
#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100)
|
||||
#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
|
||||
#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
|
||||
#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
|
||||
|
||||
bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu);
|
||||
void gk20a_pmu_isr(struct gk20a *g);
|
||||
|
||||
int gk20a_pmu_enable_elpg(struct gk20a *g);
|
||||
int gk20a_pmu_disable_elpg(struct gk20a *g);
|
||||
int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg);
|
||||
|
||||
u32 gk20a_pmu_pg_engines_list(struct gk20a *g);
|
||||
u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id);
|
||||
|
||||
@@ -87,6 +45,8 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries);
|
||||
|
||||
int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable);
|
||||
|
||||
void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id);
|
||||
|
||||
int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
|
||||
@@ -109,12 +69,10 @@ void pmu_copy_from_dmem(struct nvgpu_pmu *pmu,
|
||||
u32 src, u8 *dst, u32 size, u8 port);
|
||||
int pmu_reset(struct nvgpu_pmu *pmu);
|
||||
int pmu_bootstrap(struct nvgpu_pmu *pmu);
|
||||
|
||||
void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
|
||||
void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
|
||||
|
||||
int gk20a_pmu_ap_send_command(struct gk20a *g,
|
||||
union pmu_ap_cmd *p_ap_cmd, bool b_block);
|
||||
int gk20a_aelpg_init(struct gk20a *g);
|
||||
int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
|
||||
void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable);
|
||||
int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
|
||||
u32 *var, u32 val);
|
||||
|
||||
@@ -112,6 +112,28 @@ enum {
|
||||
PMU_SEQ_STATE_CANCELLED
|
||||
};
|
||||
|
||||
/*PG defines used by nvpgu-pmu*/
|
||||
#define PMU_PG_IDLE_THRESHOLD_SIM 1000
|
||||
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
|
||||
/* TBD: QT or else ? */
|
||||
#define PMU_PG_IDLE_THRESHOLD 15000
|
||||
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000
|
||||
|
||||
#define PMU_PG_LPWR_FEATURE_RPPG 0x0
|
||||
#define PMU_PG_LPWR_FEATURE_MSCG 0x1
|
||||
|
||||
#define PMU_MSCG_DISABLED 0
|
||||
#define PMU_MSCG_ENABLED 1
|
||||
|
||||
/* Default Sampling Period of AELPG */
|
||||
#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
|
||||
|
||||
/* Default values of APCTRL parameters */
|
||||
#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100)
|
||||
#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
|
||||
#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
|
||||
#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
|
||||
|
||||
typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32,
|
||||
u32);
|
||||
|
||||
@@ -384,14 +406,6 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
|
||||
void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
|
||||
bool post_change_event);
|
||||
|
||||
/* PG */
|
||||
int nvgpu_pmu_init_powergating(struct gk20a *g);
|
||||
int nvgpu_pmu_init_bind_fecs(struct gk20a *g);
|
||||
void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g);
|
||||
|
||||
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data);
|
||||
|
||||
/* NVGPU-PMU MEM alloc */
|
||||
void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
@@ -405,4 +419,23 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g);
|
||||
|
||||
/* PG init*/
|
||||
int nvgpu_pmu_init_powergating(struct gk20a *g);
|
||||
int nvgpu_pmu_init_bind_fecs(struct gk20a *g);
|
||||
void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g);
|
||||
|
||||
/* PG enable/disable */
|
||||
int nvgpu_pmu_enable_elpg(struct gk20a *g);
|
||||
int nvgpu_pmu_disable_elpg(struct gk20a *g);
|
||||
int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg);
|
||||
|
||||
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
|
||||
struct pmu_pg_stats_data *pg_stat_data);
|
||||
|
||||
/* AELPG */
|
||||
int nvgpu_aelpg_init(struct gk20a *g);
|
||||
int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
|
||||
int nvgpu_pmu_ap_send_command(struct gk20a *g,
|
||||
union pmu_ap_cmd *p_ap_cmd, bool b_block);
|
||||
|
||||
#endif /* __NVGPU_PMU_H__ */
|
||||
|
||||
@@ -363,7 +363,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
|
||||
present_pstate);
|
||||
if (is_rppg_supported) {
|
||||
if (g->support_pmu && g->can_elpg)
|
||||
status = gk20a_pmu_enable_elpg(g);
|
||||
status = nvgpu_pmu_enable_elpg(g);
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&pmu->pg_mutex);
|
||||
@@ -393,7 +393,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
|
||||
present_pstate);
|
||||
if (is_rppg_supported) {
|
||||
if (g->support_pmu && g->elpg_enabled) {
|
||||
status = gk20a_pmu_disable_elpg(g);
|
||||
status = nvgpu_pmu_disable_elpg(g);
|
||||
if (status)
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user