gpu: nvgpu: PG init sequence update

-Currently PG task is created for both iGPU & dGPU as part PMU init
sequence path, but task is not required for dGPU or can be skipped
if ELPG is not supported on iGPU, made changes to create PG task only
if supported else skip it, and made some functions to private as these
are required by PG UNIT only.
-PG instance is allocated & set to default properties as needed if
support is enabled else skip it.
-Made changes in dependent files as required to reflect above changes

JIRA NVGPU-1972

Change-Id: I4efb7f1814a9ad48770acea2173e66f0a4c8a9c1
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2094840
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-04-10 22:19:41 +05:30
committed by mobile promotions
parent c53c745b02
commit ef524ee0d1
15 changed files with 343 additions and 243 deletions

View File

@@ -98,8 +98,8 @@ int nvgpu_gr_falcon_bind_fecs_elpg(struct gk20a *g)
return err; return err;
} }
if (pmu->pmu_pg.pg_buf.cpu_va == NULL) { if (pmu->pg->pg_buf.cpu_va == NULL) {
err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pmu_pg.pg_buf); err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg->pg_buf);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to allocate memory"); nvgpu_err(g, "failed to allocate memory");
return -ENOMEM; return -ENOMEM;
@@ -116,7 +116,7 @@ int nvgpu_gr_falcon_bind_fecs_elpg(struct gk20a *g)
return err; return err;
} }
data = u64_lo32(pmu->pmu_pg.pg_buf.gpu_va >> 8); data = u64_lo32(pmu->pg->pg_buf.gpu_va >> 8);
err = g->ops.gr.falcon.ctrl_ctxsw(g, err = g->ops.gr.falcon.ctrl_ctxsw(g,
NVGPU_GR_FALCON_METHOD_REGLIST_SET_VIRTUAL_ADDRESS, data, NULL); NVGPU_GR_FALCON_METHOD_REGLIST_SET_VIRTUAL_ADDRESS, data, NULL);
if (err != 0) { if (err != 0) {

View File

@@ -64,8 +64,10 @@ void nvgpu_pmu_fw_state_change(struct gk20a *g, struct nvgpu_pmu *pmu,
pmu->fw.state = pmu_state; pmu->fw.state = pmu_state;
if (post_change_event) { if (post_change_event) {
pmu->pmu_pg.pg_init.state_change = true; if (g->can_elpg) {
nvgpu_cond_signal(&pmu->pmu_pg.pg_init.wq); pmu->pg->pg_init.state_change = true;
nvgpu_cond_signal(&pmu->pg->pg_init.wq);
}
} }
} }

View File

@@ -379,15 +379,15 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
if (pstate_lock) { if (pstate_lock) {
nvgpu_clk_arb_pstate_change_lock(g, true); nvgpu_clk_arb_pstate_change_lock(g, true);
} }
nvgpu_mutex_acquire(&pmu->pmu_pg.pg_mutex); nvgpu_mutex_acquire(&pmu->pg->pg_mutex);
present_pstate = nvgpu_clk_arb_get_current_pstate(g); present_pstate = nvgpu_clk_arb_get_current_pstate(g);
is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g, is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
present_pstate); present_pstate);
if (is_mscg_supported && g->mscg_enabled) { if (is_mscg_supported && g->mscg_enabled) {
if (pmu->mscg_stat == 0U) { if (pmu->pg->mscg_stat == 0U) {
pmu->mscg_stat = PMU_MSCG_ENABLED; pmu->pg->mscg_stat = PMU_MSCG_ENABLED;
} }
} }
@@ -399,7 +399,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
} }
} }
nvgpu_mutex_release(&pmu->pmu_pg.pg_mutex); nvgpu_mutex_release(&pmu->pg->pg_mutex);
if (pstate_lock) { if (pstate_lock) {
nvgpu_clk_arb_pstate_change_lock(g, false); nvgpu_clk_arb_pstate_change_lock(g, false);
} }
@@ -420,7 +420,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
if (pstate_lock) { if (pstate_lock) {
nvgpu_clk_arb_pstate_change_lock(g, true); nvgpu_clk_arb_pstate_change_lock(g, true);
} }
nvgpu_mutex_acquire(&pmu->pmu_pg.pg_mutex); nvgpu_mutex_acquire(&pmu->pg->pg_mutex);
present_pstate = nvgpu_clk_arb_get_current_pstate(g); present_pstate = nvgpu_clk_arb_get_current_pstate(g);
@@ -438,13 +438,13 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g, is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
present_pstate); present_pstate);
if (is_mscg_supported && g->mscg_enabled) { if (is_mscg_supported && g->mscg_enabled) {
if (pmu->mscg_stat != 0U) { if (pmu->pg->mscg_stat != 0U) {
pmu->mscg_stat = PMU_MSCG_DISABLED; pmu->pg->mscg_stat = PMU_MSCG_DISABLED;
} }
} }
exit_unlock: exit_unlock:
nvgpu_mutex_release(&pmu->pmu_pg.pg_mutex); nvgpu_mutex_release(&pmu->pg->pg_mutex);
if (pstate_lock) { if (pstate_lock) {
nvgpu_clk_arb_pstate_change_lock(g, false); nvgpu_clk_arb_pstate_change_lock(g, false);
} }

View File

@@ -47,7 +47,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
{ {
struct nvgpu_pmu *pmu = param; struct nvgpu_pmu *pmu = param;
nvgpu_pmu_dbg(g, "reply ZBC_TABLE_UPDATE"); nvgpu_pmu_dbg(g, "reply ZBC_TABLE_UPDATE");
pmu->pmu_pg.zbc_save_done = true; pmu->pg->zbc_save_done = true;
} }
void gm20b_pmu_save_zbc(struct gk20a *g, u32 entries) void gm20b_pmu_save_zbc(struct gk20a *g, u32 entries)
@@ -58,7 +58,7 @@ void gm20b_pmu_save_zbc(struct gk20a *g, u32 entries)
int err = 0; int err = 0;
if (!nvgpu_pmu_get_fw_ready(g, pmu) || if (!nvgpu_pmu_get_fw_ready(g, pmu) ||
(entries == 0U) || !pmu->pmu_pg.zbc_ready) { (entries == 0U) || !pmu->pg->zbc_ready) {
return; return;
} }
@@ -70,7 +70,7 @@ void gm20b_pmu_save_zbc(struct gk20a *g, u32 entries)
cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update; cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update;
cmd.cmd.zbc.entry_mask = ZBC_MASK(entries); cmd.cmd.zbc.entry_mask = ZBC_MASK(entries);
pmu->pmu_pg.zbc_save_done = false; pmu->pg->zbc_save_done = false;
nvgpu_pmu_dbg(g, "cmd post ZBC_TABLE_UPDATE"); nvgpu_pmu_dbg(g, "cmd post ZBC_TABLE_UPDATE");
err = nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ, err = nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_HPQ,
@@ -80,8 +80,8 @@ void gm20b_pmu_save_zbc(struct gk20a *g, u32 entries)
return; return;
} }
pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g), pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g),
&pmu->pmu_pg.zbc_save_done, 1); &pmu->pg->zbc_save_done, 1);
if (!pmu->pmu_pg.zbc_save_done) { if (!pmu->pg->zbc_save_done) {
nvgpu_err(g, "ZBC save timeout"); nvgpu_err(g, "ZBC save timeout");
} }
} }
@@ -94,7 +94,7 @@ int gm20b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
int err; int err;
err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
pmu->pmu_pg.stat_dmem_offset[pg_engine_id], pmu->pg->stat_dmem_offset[pg_engine_id],
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats), 0); (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats), 0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed"); nvgpu_err(g, "PMU falcon DMEM copy failed");

View File

@@ -106,7 +106,7 @@ int gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
int err; int err;
err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
pmu->pmu_pg.stat_dmem_offset[pg_engine_id], pmu->pg->stat_dmem_offset[pg_engine_id],
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0); (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v2), 0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed"); nvgpu_err(g, "PMU falcon DMEM copy failed");

View File

@@ -85,7 +85,7 @@ int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
int err; int err;
err = nvgpu_falcon_copy_from_dmem(&pmu->flcn, err = nvgpu_falcon_copy_from_dmem(&pmu->flcn,
pmu->pmu_pg.stat_dmem_offset[pg_engine_id], pmu->pg->stat_dmem_offset[pg_engine_id],
(u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0); (u8 *)&stats, (u32)sizeof(struct pmu_pg_stats_v1), 0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed"); nvgpu_err(g, "PMU falcon DMEM copy failed");

View File

@@ -35,7 +35,7 @@ int nvgpu_aelpg_init(struct gk20a *g)
union pmu_ap_cmd ap_cmd; union pmu_ap_cmd ap_cmd;
ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT; ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
ap_cmd.init.pg_sampling_period_us = g->pmu.pmu_pg.aelpg_param[0]; ap_cmd.init.pg_sampling_period_us = g->pmu.pg->aelpg_param[0];
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false); status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
return status; return status;
@@ -43,19 +43,20 @@ int nvgpu_aelpg_init(struct gk20a *g)
int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id) int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
{ {
struct nvgpu_pmu *pmu = &g->pmu;
int status = 0; int status = 0;
union pmu_ap_cmd ap_cmd; union pmu_ap_cmd ap_cmd;
ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL; ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id; ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us = ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
g->pmu.pmu_pg.aelpg_param[1]; pmu->pg->aelpg_param[1];
ap_cmd.init_and_enable_ctrl.params.min_target_saving_us = ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
g->pmu.pmu_pg.aelpg_param[2]; pmu->pg->aelpg_param[2];
ap_cmd.init_and_enable_ctrl.params.power_break_even_us = ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
g->pmu.pmu_pg.aelpg_param[3]; pmu->pg->aelpg_param[3];
ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max = ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
g->pmu.pmu_pg.aelpg_param[4]; pmu->pg->aelpg_param[4];
switch (ctrl_id) { switch (ctrl_id) {
case PMU_AP_CTRL_ID_GRAPHICS: case PMU_AP_CTRL_ID_GRAPHICS:

View File

@@ -54,19 +54,28 @@
#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
#define PMU_PGENG_GR_BUFFER_IDX_FECS (2) #define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
static int pmu_setup_hw_enable_elpg(struct gk20a *g) static bool is_pg_supported(struct gk20a *g, struct nvgpu_pmu_pg *pg)
{ {
struct nvgpu_pmu *pmu = &g->pmu; if (!g->support_ls_pmu || !g->can_elpg || pg == NULL) {
int err = 0; return false;
}
return true;
}
static int pmu_pg_setup_hw_enable_elpg(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{
int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
pmu->pmu_pg.initialized = true; pg->initialized = true;
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_STARTED, false); nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_STARTED, false);
if (nvgpu_is_enabled(g, NVGPU_PMU_ZBC_SAVE)) { if (nvgpu_is_enabled(g, NVGPU_PMU_ZBC_SAVE)) {
/* Save zbc table after PMU is initialized. */ /* Save zbc table after PMU is initialized. */
pmu->pmu_pg.zbc_ready = true; pg->zbc_ready = true;
g->ops.pmu.save_zbc(g, 0xf); g->ops.pmu.save_zbc(g, 0xf);
} }
@@ -126,9 +135,9 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d", nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d",
elpg_msg->engine_id); elpg_msg->engine_id);
if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
pmu->mscg_transition_state = PMU_ELPG_STAT_ON; pmu->pg->mscg_transition_state = PMU_ELPG_STAT_ON;
} else { } else {
pmu->pmu_pg.elpg_stat = PMU_ELPG_STAT_ON; pmu->pg->elpg_stat = PMU_ELPG_STAT_ON;
} }
break; break;
case PMU_PG_ELPG_MSG_DISALLOW_ACK: case PMU_PG_ELPG_MSG_DISALLOW_ACK:
@@ -136,9 +145,9 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
elpg_msg->engine_id); elpg_msg->engine_id);
if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; pmu->pg->mscg_transition_state = PMU_ELPG_STAT_OFF;
} else { } else {
pmu->pmu_pg.elpg_stat = PMU_ELPG_STAT_OFF; pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF;
} }
if (nvgpu_pmu_get_fw_state(g, pmu) == if (nvgpu_pmu_get_fw_state(g, pmu) ==
@@ -147,10 +156,10 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
g->ops.pmu.pmu_pg_engines_feature_list(g, g->ops.pmu.pmu_pg_engines_feature_list(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) { NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
pmu->pmu_pg.initialized = true; pmu->pg->initialized = true;
nvgpu_pmu_fw_state_change(g, pmu, nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_STARTED,
PMU_FW_STATE_STARTED, true); true);
WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED); WRITE_ONCE(pmu->pg->mscg_stat, PMU_MSCG_DISABLED);
/* make status visible */ /* make status visible */
nvgpu_smp_mb(); nvgpu_smp_mb();
} else { } else {
@@ -171,7 +180,7 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg)
{ {
int status = 0; int status = 0;
if (!g->support_ls_pmu) { if (!is_pg_supported(g, g->pmu.pg)) {
return status; return status;
} }
@@ -226,9 +235,9 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u8 pg_engine_id)
* pending to sync with follow up ELPG disable * pending to sync with follow up ELPG disable
*/ */
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
pmu->pmu_pg.elpg_stat = PMU_ELPG_STAT_ON_PENDING; pmu->pg->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING; pmu->pg->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
} }
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW"); nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW");
@@ -256,34 +265,34 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (!g->support_ls_pmu) { if (!is_pg_supported(g, g->pmu.pg)) {
return ret; return ret;
} }
nvgpu_mutex_acquire(&pmu->pmu_pg.elpg_mutex); nvgpu_mutex_acquire(&pmu->pg->elpg_mutex);
pmu->pmu_pg.elpg_refcnt++; pmu->pg->elpg_refcnt++;
if (pmu->pmu_pg.elpg_refcnt <= 0) { if (pmu->pg->elpg_refcnt <= 0) {
goto exit_unlock; goto exit_unlock;
} }
/* something is not right if we end up in following code path */ /* something is not right if we end up in following code path */
if (unlikely(pmu->pmu_pg.elpg_refcnt > 1)) { if (unlikely(pmu->pg->elpg_refcnt > 1)) {
nvgpu_warn(g, nvgpu_warn(g,
"%s(): possible elpg refcnt mismatch. elpg refcnt=%d", "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
__func__, pmu->pmu_pg.elpg_refcnt); __func__, pmu->pg->elpg_refcnt);
WARN_ON(true); WARN_ON(true);
} }
/* do NOT enable elpg until golden ctx is created, /* do NOT enable elpg until golden ctx is created,
* which is related with the ctx that ELPG save and restore. * which is related with the ctx that ELPG save and restore.
*/ */
if (unlikely(!pmu->pmu_pg.golden_image_initialized)) { if (unlikely(!pmu->pg->golden_image_initialized)) {
goto exit_unlock; goto exit_unlock;
} }
/* return if ELPG is already on or on_pending or off_on_pending */ /* return if ELPG is already on or on_pending or off_on_pending */
if (pmu->pmu_pg.elpg_stat != PMU_ELPG_STAT_OFF) { if (pmu->pg->elpg_stat != PMU_ELPG_STAT_OFF) {
goto exit_unlock; goto exit_unlock;
} }
@@ -296,7 +305,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
pg_engine_id++) { pg_engine_id++) {
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS && if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
pmu->mscg_stat == PMU_MSCG_DISABLED) { pmu->pg->mscg_stat == PMU_MSCG_DISABLED) {
continue; continue;
} }
@@ -306,7 +315,7 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
} }
exit_unlock: exit_unlock:
nvgpu_mutex_release(&pmu->pmu_pg.elpg_mutex); nvgpu_mutex_release(&pmu->pg->elpg_mutex);
nvgpu_log_fn(g, "done"); nvgpu_log_fn(g, "done");
return ret; return ret;
} }
@@ -315,10 +324,14 @@ static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
{ {
struct gk20a *g = pmu->g; struct gk20a *g = pmu->g;
if (!is_pg_supported(g, pmu->pg)) {
return;
}
/* Print PG stats */ /* Print PG stats */
nvgpu_err(g, "Print PG stats"); nvgpu_err(g, "Print PG stats");
nvgpu_falcon_print_dmem(&pmu->flcn, nvgpu_falcon_print_dmem(&pmu->flcn,
pmu->pmu_pg.stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS], pmu->pg->stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_GRAPHICS],
(u32)sizeof(struct pmu_pg_stats_v2)); (u32)sizeof(struct pmu_pg_stats_v2));
/* Print ELPG stats */ /* Print ELPG stats */
@@ -337,7 +350,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (!g->support_ls_pmu) { if (!is_pg_supported(g, pmu->pg)) {
return ret; return ret;
} }
@@ -345,33 +358,33 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g); pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
} }
nvgpu_mutex_acquire(&pmu->pmu_pg.elpg_mutex); nvgpu_mutex_acquire(&pmu->pg->elpg_mutex);
pmu->pmu_pg.elpg_refcnt--; pmu->pg->elpg_refcnt--;
if (pmu->pmu_pg.elpg_refcnt > 0) { if (pmu->pg->elpg_refcnt > 0) {
nvgpu_warn(g, nvgpu_warn(g,
"%s(): possible elpg refcnt mismatch. elpg refcnt=%d", "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
__func__, pmu->pmu_pg.elpg_refcnt); __func__, pmu->pg->elpg_refcnt);
WARN_ON(true); WARN_ON(true);
ret = 0; ret = 0;
goto exit_unlock; goto exit_unlock;
} }
/* cancel off_on_pending and return */ /* cancel off_on_pending and return */
if (pmu->pmu_pg.elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) { if (pmu->pg->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
pmu->pmu_pg.elpg_stat = PMU_ELPG_STAT_OFF; pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF;
ret = 0; ret = 0;
goto exit_reschedule; goto exit_reschedule;
} }
/* wait if on_pending */ /* wait if on_pending */
else if (pmu->pmu_pg.elpg_stat == PMU_ELPG_STAT_ON_PENDING) { else if (pmu->pg->elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g), pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g),
&pmu->pmu_pg.elpg_stat, PMU_ELPG_STAT_ON); &pmu->pg->elpg_stat, PMU_ELPG_STAT_ON);
if (pmu->pmu_pg.elpg_stat != PMU_ELPG_STAT_ON) { if (pmu->pg->elpg_stat != PMU_ELPG_STAT_ON) {
nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d", nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
pmu->pmu_pg.elpg_stat); pmu->pg->elpg_stat);
pmu_dump_elpg_stats(pmu); pmu_dump_elpg_stats(pmu);
nvgpu_pmu_dump_falcon_stats(pmu); nvgpu_pmu_dump_falcon_stats(pmu);
ret = -EBUSY; ret = -EBUSY;
@@ -379,7 +392,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
} }
} }
/* return if ELPG is already off */ /* return if ELPG is already off */
else if (pmu->pmu_pg.elpg_stat != PMU_ELPG_STAT_ON) { else if (pmu->pg->elpg_stat != PMU_ELPG_STAT_ON) {
ret = 0; ret = 0;
goto exit_reschedule; goto exit_reschedule;
} }
@@ -389,7 +402,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
pg_engine_id++) { pg_engine_id++) {
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS && if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
pmu->mscg_stat == PMU_MSCG_DISABLED) { pmu->pg->mscg_stat == PMU_MSCG_DISABLED) {
continue; continue;
} }
@@ -405,15 +418,15 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW; cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
pmu->pmu_pg.elpg_stat = PMU_ELPG_STAT_OFF_PENDING; pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
pmu->mscg_transition_state = pmu->pg->mscg_transition_state =
PMU_ELPG_STAT_OFF_PENDING; PMU_ELPG_STAT_OFF_PENDING;
} }
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
ptr = &pmu->pmu_pg.elpg_stat; ptr = &pmu->pg->elpg_stat;
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
ptr = &pmu->mscg_transition_state; ptr = &pmu->pg->mscg_transition_state;
} }
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW"); nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
@@ -441,7 +454,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
exit_reschedule: exit_reschedule:
exit_unlock: exit_unlock:
nvgpu_mutex_release(&pmu->pmu_pg.elpg_mutex); nvgpu_mutex_release(&pmu->pg->elpg_mutex);
nvgpu_log_fn(g, "done"); nvgpu_log_fn(g, "done");
return ret; return ret;
} }
@@ -462,7 +475,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
switch (msg->msg.pg.stat.sub_msg_id) { switch (msg->msg.pg.stat.sub_msg_id) {
case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET: case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
nvgpu_pmu_dbg(g, "ALLOC_DMEM_OFFSET is acknowledged from PMU"); nvgpu_pmu_dbg(g, "ALLOC_DMEM_OFFSET is acknowledged from PMU");
pmu->pmu_pg.stat_dmem_offset[msg->msg.pg.stat.engine_id] = pmu->pg->stat_dmem_offset[msg->msg.pg.stat.engine_id] =
msg->msg.pg.stat.data; msg->msg.pg.stat.data;
break; break;
default: default:
@@ -510,7 +523,7 @@ static int pmu_pg_init_send(struct gk20a *g, u8 pg_engine_id)
} }
/* alloc dmem for powergating state log */ /* alloc dmem for powergating state log */
pmu->pmu_pg.stat_dmem_offset[pg_engine_id] = 0; pmu->pg->stat_dmem_offset[pg_engine_id] = 0;
(void) memset(&cmd, 0, sizeof(struct pmu_cmd)); (void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.unit_id = PMU_UNIT_PG;
tmp = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat); tmp = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
@@ -534,9 +547,9 @@ static int pmu_pg_init_send(struct gk20a *g, u8 pg_engine_id)
*/ */
/* set for wait_event PMU_ELPG_STAT_OFF */ /* set for wait_event PMU_ELPG_STAT_OFF */
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
pmu->pmu_pg.elpg_stat = PMU_ELPG_STAT_OFF; pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF;
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
pmu->mscg_transition_state = PMU_ELPG_STAT_OFF; pmu->pg->mscg_transition_state = PMU_ELPG_STAT_OFF;
} }
(void) memset(&cmd, 0, sizeof(struct pmu_cmd)); (void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.unit_id = PMU_UNIT_PG;
@@ -567,11 +580,11 @@ static int pmu_pg_init_send(struct gk20a *g, u8 pg_engine_id)
return err; return err;
} }
int nvgpu_pmu_init_powergating(struct gk20a *g) static int pmu_pg_init_powergating(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{ {
u8 pg_engine_id; u8 pg_engine_id;
u32 pg_engine_id_list = 0; u32 pg_engine_id_list = 0;
struct nvgpu_pmu *pmu = &g->pmu;
int err = 0; int err = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -625,8 +638,8 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
return; return;
} }
pmu->pmu_pg.buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED); pmu->pg->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
if ((!pmu->pmu_pg.buf_loaded) && if ((!pmu->pg->buf_loaded) &&
(nvgpu_pmu_get_fw_state(g, pmu) == (nvgpu_pmu_get_fw_state(g, pmu) ==
PMU_FW_STATE_LOADING_PG_BUF)) { PMU_FW_STATE_LOADING_PG_BUF)) {
nvgpu_err(g, "failed to load PGENG buffer"); nvgpu_err(g, "failed to load PGENG buffer");
@@ -636,9 +649,9 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
} }
} }
int nvgpu_pmu_init_bind_fecs(struct gk20a *g) static int pmu_pg_init_bind_fecs(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{ {
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd; struct pmu_cmd cmd;
int err = 0; int err = 0;
u32 gr_engine_id; u32 gr_engine_id;
@@ -659,16 +672,17 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
pmu->fw.ops.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
PMU_PGENG_GR_BUFFER_IDX_FECS); PMU_PGENG_GR_BUFFER_IDX_FECS);
pmu->fw.ops.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
pmu->pmu_pg.pg_buf.size); pmu->pg->pg_buf.size);
pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
u64_lo32(pmu->pmu_pg.pg_buf.gpu_va)); u64_lo32(pmu->pg->pg_buf.gpu_va));
pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
(u8)(pmu->pmu_pg.pg_buf.gpu_va & 0xFFU)); (u8)(pmu->pg->pg_buf.gpu_va & 0xFFU));
pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
PMU_DMAIDX_VIRT); PMU_DMAIDX_VIRT);
pmu->pmu_pg.buf_loaded = false; pg->buf_loaded = false;
nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); nvgpu_pmu_dbg(g,
"cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_PG_BUF, false); nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_PG_BUF, false);
err = nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_LPQ, err = nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_LPQ,
pmu_handle_pg_buf_config_msg, pmu); pmu_handle_pg_buf_config_msg, pmu);
@@ -679,9 +693,9 @@ int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
return err; return err;
} }
void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g) static void pmu_pg_setup_hw_load_zbc(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{ {
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd; struct pmu_cmd cmd;
u32 gr_engine_id; u32 gr_engine_id;
int err = 0; int err = 0;
@@ -700,16 +714,17 @@ void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
pmu->fw.ops.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
PMU_PGENG_GR_BUFFER_IDX_ZBC); PMU_PGENG_GR_BUFFER_IDX_ZBC);
pmu->fw.ops.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
pmu->pmu_pg.seq_buf.size); pmu->pg->seq_buf.size);
pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
u64_lo32(pmu->pmu_pg.seq_buf.gpu_va)); u64_lo32(pmu->pg->seq_buf.gpu_va));
pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
(u8)(pmu->pmu_pg.seq_buf.gpu_va & 0xFFU)); (u8)(pmu->pg->seq_buf.gpu_va & 0xFFU));
pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg, pmu->fw.ops.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
PMU_DMAIDX_VIRT); PMU_DMAIDX_VIRT);
pmu->pmu_pg.buf_loaded = false; pg->buf_loaded = false;
nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC"); nvgpu_pmu_dbg(g,
"cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_ZBC, false); nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_ZBC, false);
err = nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_LPQ, err = nvgpu_pmu_cmd_post(g, &cmd, NULL, PMU_COMMAND_QUEUE_LPQ,
pmu_handle_pg_buf_config_msg, pmu); pmu_handle_pg_buf_config_msg, pmu);
@@ -726,7 +741,7 @@ int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
u32 pg_engine_id_list = 0; u32 pg_engine_id_list = 0;
int err = 0; int err = 0;
if (!pmu->pmu_pg.initialized) { if (!is_pg_supported(g, pmu->pg) || !pmu->pg->initialized) {
pg_stat_data->ingating_time = 0; pg_stat_data->ingating_time = 0;
pg_stat_data->ungating_time = 0; pg_stat_data->ungating_time = 0;
pg_stat_data->gating_cnt = 0; pg_stat_data->gating_cnt = 0;
@@ -745,45 +760,21 @@ int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
return err; return err;
} }
int nvgpu_init_task_pg_init(struct gk20a *g) /* PG state machine */
static void pmu_pg_kill_task(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{ {
struct nvgpu_pmu *pmu = &g->pmu;
char thread_name[64];
int err = 0;
nvgpu_log_fn(g, " ");
err = nvgpu_cond_init(&pmu->pmu_pg.pg_init.wq);
if (err != 0) {
nvgpu_err(g, "nvgpu_cond_init failed err=%d", err);
return err;
}
(void) snprintf(thread_name, sizeof(thread_name),
"nvgpu_pg_init_%s", g->name);
err = nvgpu_thread_create(&pmu->pmu_pg.pg_init.state_task, g,
nvgpu_pg_init_task, thread_name);
if (err != 0) {
nvgpu_err(g, "failed to start nvgpu_pg_init thread");
}
return err;
}
void nvgpu_kill_task_pg_init(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
int err = 0; int err = 0;
/* make sure the pending operations are finished before we continue */ /* make sure the pending operations are finished before we continue */
if (nvgpu_thread_is_running(&pmu->pmu_pg.pg_init.state_task)) { if (nvgpu_thread_is_running(&pg->pg_init.state_task)) {
/* post PMU_FW_STATE_EXIT to exit PMU state machine loop */ /* post PMU_FW_STATE_EXIT to exit PMU state machine loop */
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_EXIT, true); nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_EXIT, true);
/* Make thread stop*/ /* Make thread stop*/
nvgpu_thread_stop(&pmu->pmu_pg.pg_init.state_task); nvgpu_thread_stop(&pg->pg_init.state_task);
/* wait to confirm thread stopped */ /* wait to confirm thread stopped */
err = nvgpu_timeout_init(g, &timeout, 1000, err = nvgpu_timeout_init(g, &timeout, 1000,
@@ -793,22 +784,22 @@ void nvgpu_kill_task_pg_init(struct gk20a *g)
return; return;
} }
do { do {
if (!nvgpu_thread_is_running(&pmu->pmu_pg.pg_init.state_task)) { if (!nvgpu_thread_is_running(&pg->pg_init.state_task)) {
break; break;
} }
nvgpu_udelay(2); nvgpu_udelay(2);
} while (nvgpu_timeout_expired_msg(&timeout, } while (nvgpu_timeout_expired_msg(&timeout,
"timeout - waiting PMU state machine thread stop") == 0); "timeout - waiting PMU state machine thread stop") == 0);
} else { } else {
nvgpu_thread_join(&pmu->pmu_pg.pg_init.state_task); nvgpu_thread_join(&pg->pg_init.state_task);
} }
} }
int nvgpu_pg_init_task(void *arg) static int pmu_pg_task(void *arg)
{ {
struct gk20a *g = (struct gk20a *)arg; struct gk20a *g = (struct gk20a *)arg;
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
struct nvgpu_pg_init *pg_init = &pmu->pmu_pg.pg_init; struct nvgpu_pg_init *pg_init = &pmu->pg->pg_init;
u32 pmu_state = 0; u32 pmu_state = 0;
int err = 0; int err = 0;
@@ -819,7 +810,7 @@ int nvgpu_pg_init_task(void *arg)
NVGPU_COND_WAIT_INTERRUPTIBLE(&pg_init->wq, NVGPU_COND_WAIT_INTERRUPTIBLE(&pg_init->wq,
(pg_init->state_change == true), 0U); (pg_init->state_change == true), 0U);
pmu->pmu_pg.pg_init.state_change = false; pmu->pg->pg_init.state_change = false;
pmu_state = nvgpu_pmu_get_fw_state(g, pmu); pmu_state = nvgpu_pmu_get_fw_state(g, pmu);
if (pmu_state == PMU_FW_STATE_EXIT) { if (pmu_state == PMU_FW_STATE_EXIT) {
@@ -831,20 +822,20 @@ int nvgpu_pg_init_task(void *arg)
case PMU_FW_STATE_INIT_RECEIVED: case PMU_FW_STATE_INIT_RECEIVED:
nvgpu_pmu_dbg(g, "pmu starting"); nvgpu_pmu_dbg(g, "pmu starting");
if (g->can_elpg) { if (g->can_elpg) {
err = nvgpu_pmu_init_powergating(g); err = pmu_pg_init_powergating(g, pmu, pmu->pg);
} }
break; break;
case PMU_FW_STATE_ELPG_BOOTED: case PMU_FW_STATE_ELPG_BOOTED:
nvgpu_pmu_dbg(g, "elpg booted"); nvgpu_pmu_dbg(g, "elpg booted");
err = nvgpu_pmu_init_bind_fecs(g); err = pmu_pg_init_bind_fecs(g, pmu, pmu->pg);
break; break;
case PMU_FW_STATE_LOADING_PG_BUF: case PMU_FW_STATE_LOADING_PG_BUF:
nvgpu_pmu_dbg(g, "loaded pg buf"); nvgpu_pmu_dbg(g, "loaded pg buf");
nvgpu_pmu_setup_hw_load_zbc(g); pmu_pg_setup_hw_load_zbc(g, pmu, pmu->pg);
break; break;
case PMU_FW_STATE_LOADING_ZBC: case PMU_FW_STATE_LOADING_ZBC:
nvgpu_pmu_dbg(g, "loaded zbc"); nvgpu_pmu_dbg(g, "loaded zbc");
err = pmu_setup_hw_enable_elpg(g); err = pmu_pg_setup_hw_enable_elpg(g, pmu, pmu->pg);
nvgpu_pmu_dbg(g, "PMU booted, thread exiting"); nvgpu_pmu_dbg(g, "PMU booted, thread exiting");
return 0; return 0;
default: default:
@@ -871,36 +862,172 @@ int nvgpu_pg_init_task(void *arg)
return err; return err;
} }
int nvgpu_pmu_pg_init_seq_buf(struct nvgpu_pmu *pmu, struct vm_gk20a *vm) static int pmu_pg_task_init(struct gk20a *g, struct nvgpu_pmu_pg *pg)
{ {
char thread_name[64];
int err = 0;
nvgpu_log_fn(g, " ");
nvgpu_cond_init(&pg->pg_init.wq);
(void) snprintf(thread_name, sizeof(thread_name),
"nvgpu_pg_init_%s", g->name);
err = nvgpu_thread_create(&pg->pg_init.state_task, g,
pmu_pg_task, thread_name);
if (err != 0) {
nvgpu_err(g, "failed to start nvgpu_pg_init thread");
}
return err;
}
static int pmu_pg_init_seq_buf(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
int err; int err;
u8 *ptr; u8 *ptr;
err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
&pmu->pmu_pg.seq_buf); &pg->seq_buf);
if (err != 0) { if (err != 0) {
return err; return err;
} }
ptr = (u8 *)pmu->pmu_pg.seq_buf.cpu_va; ptr = (u8 *)pg->seq_buf.cpu_va;
ptr[0] = 0x16; /* opcode EXIT */ ptr[0] = 0x16; /* opcode EXIT */
ptr[1] = 0; ptr[2] = 1; ptr[3] = 0; ptr[1] = 0; ptr[2] = 1; ptr[3] = 0;
ptr[4] = 0; ptr[5] = 0; ptr[6] = 0; ptr[7] = 0; ptr[4] = 0; ptr[5] = 0; ptr[6] = 0; ptr[7] = 0;
pmu->pmu_pg.seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE; pg->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
return err; return err;
} }
void nvgpu_pmu_pg_free_seq_buf(struct nvgpu_pmu *pmu, struct vm_gk20a *vm) int nvgpu_pmu_pg_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{ {
nvgpu_dma_unmap_free(vm, &pmu->pmu_pg.seq_buf); int err;
if (!is_pg_supported(g, pg)) {
return 0;
}
/* start with elpg disabled until first enable call */
pg->elpg_refcnt = 0;
/* skip seq_buf alloc during unrailgate path */
if (!nvgpu_mem_is_valid(&pg->seq_buf)) {
err = pmu_pg_init_seq_buf(g, pmu, pg);
if (err != 0) {
nvgpu_err(g, "failed to allocate memory");
return err;
}
}
/* Create thread to handle PMU state machine */
return pmu_pg_task_init(g, pg);
}
void nvgpu_pmu_pg_destroy(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{
struct pmu_pg_stats_data pg_stat_data = { 0 };
if (!is_pg_supported(g, pg)) {
return;
}
pmu_pg_kill_task(g, pmu, pg);
nvgpu_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
if (nvgpu_pmu_disable_elpg(g) != 0) {
nvgpu_err(g, "failed to set disable elpg");
}
pg->initialized = false;
/* update the s/w ELPG residency counters */
g->pg_ingating_time_us += (u64)pg_stat_data.ingating_time;
g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time;
g->pg_gating_cnt += pg_stat_data.gating_cnt;
pg->zbc_ready = false;
}
int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg **pg_p)
{
struct nvgpu_pmu_pg *pg;
int err = 0;
if (*pg_p != NULL) {
/* skip alloc/reinit for unrailgate sequence */
nvgpu_pmu_dbg(g, "skip lsfm init for unrailgate sequence");
goto exit;
}
pg = (struct nvgpu_pmu_pg *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_pmu_pg));
if (pg == NULL) {
err = -ENOMEM;
goto exit;
}
/* set default values to aelpg parameters */
pg->aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US;
pg->aelpg_param[1] = APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US;
pg->aelpg_param[2] = APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US;
pg->aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US;
pg->aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
err = nvgpu_mutex_init(&pg->elpg_mutex);
if (err != 0) {
nvgpu_kfree(g, pg);
goto exit;
}
err = nvgpu_mutex_init(&pg->pg_mutex);
if (err != 0) {
nvgpu_mutex_destroy(&pg->elpg_mutex);
nvgpu_kfree(g, pg);
goto exit;
}
*pg_p = pg;
exit:
return err;
}
void nvgpu_pmu_pg_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
if (!is_pg_supported(g, pg)) {
return;
}
nvgpu_dma_unmap_free(vm, &pg->seq_buf);
nvgpu_mutex_destroy(&pg->elpg_mutex);
nvgpu_mutex_destroy(&pg->pg_mutex);
nvgpu_kfree(g, pg);
} }
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized) void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized)
{ {
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
pmu->pmu_pg.golden_image_initialized = initialized;
if (!is_pg_supported(g, pmu->pg)) {
return;
}
pmu->pg->golden_image_initialized = initialized;
} }

View File

@@ -154,11 +154,13 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
/* start with elpg disabled until first enable call */ if (g->can_elpg) {
pmu->pmu_pg.elpg_refcnt = 0; err = nvgpu_pmu_pg_sw_setup(g, pmu, pmu->pg);
if (err != 0){
goto skip_init;
}
}
/* Create thread to handle PMU state machine */
nvgpu_init_task_pg_init(g);
if (pmu->sw_ready) { if (pmu->sw_ready) {
nvgpu_pmu_mutexes_init(&pmu->mutexes); nvgpu_pmu_mutexes_init(&pmu->mutexes);
nvgpu_pmu_sequences_init(&pmu->sequences); nvgpu_pmu_sequences_init(&pmu->sequences);
@@ -181,8 +183,6 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
nvgpu_pmu_sequences_init(&pmu->sequences); nvgpu_pmu_sequences_init(&pmu->sequences);
err = nvgpu_pmu_pg_init_seq_buf(pmu, vm);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to allocate memory"); nvgpu_err(g, "failed to allocate memory");
goto err_free_seq; goto err_free_seq;
@@ -192,7 +192,7 @@ static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
err = nvgpu_pmu_super_surface_buf_alloc(g, err = nvgpu_pmu_super_surface_buf_alloc(g,
pmu, pmu->super_surface); pmu, pmu->super_surface);
if (err != 0) { if (err != 0) {
goto err_free_seq_buf; goto err_free_seq;
} }
} }
@@ -213,8 +213,6 @@ skip_init:
nvgpu_dma_unmap_free(vm, nvgpu_pmu_super_surface_mem(g, nvgpu_dma_unmap_free(vm, nvgpu_pmu_super_surface_mem(g,
pmu, pmu->super_surface)); pmu, pmu->super_surface));
} }
err_free_seq_buf:
nvgpu_pmu_pg_free_seq_buf(pmu, vm);
err_free_seq: err_free_seq:
nvgpu_pmu_sequences_free(g, &pmu->sequences); nvgpu_pmu_sequences_free(g, &pmu->sequences);
err_free_mutex: err_free_mutex:
@@ -231,10 +229,6 @@ int nvgpu_init_pmu_support(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (pmu->pmu_pg.initialized) {
return 0;
}
if (!g->support_ls_pmu) { if (!g->support_ls_pmu) {
goto exit; goto exit;
} }
@@ -303,27 +297,16 @@ exit:
int nvgpu_pmu_destroy(struct gk20a *g) int nvgpu_pmu_destroy(struct gk20a *g)
{ {
struct nvgpu_pmu *pmu = &g->pmu; struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_pg_stats_data pg_stat_data = { 0 };
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (!g->support_ls_pmu) { if (!g->support_ls_pmu) {
return 0; return 0;
} }
nvgpu_kill_task_pg_init(g);
nvgpu_pmu_get_pg_stats(g, if (g->can_elpg) {
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); nvgpu_pmu_pg_destroy(g, pmu, pmu->pg);
if (nvgpu_pmu_disable_elpg(g) != 0) {
nvgpu_err(g, "failed to set disable elpg");
} }
pmu->pmu_pg.initialized = false;
/* update the s/w ELPG residency counters */
g->pg_ingating_time_us += (u64)pg_stat_data.ingating_time;
g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time;
g->pg_gating_cnt += pg_stat_data.gating_cnt;
nvgpu_mutex_acquire(&pmu->isr_mutex); nvgpu_mutex_acquire(&pmu->isr_mutex);
g->ops.pmu.pmu_enable_irq(pmu, false); g->ops.pmu.pmu_enable_irq(pmu, false);
@@ -335,7 +318,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_OFF, false); nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_OFF, false);
nvgpu_pmu_set_fw_ready(g, pmu, false); nvgpu_pmu_set_fw_ready(g, pmu, false);
pmu->pmu_perfmon->perfmon_ready = false; pmu->pmu_perfmon->perfmon_ready = false;
pmu->pmu_pg.zbc_ready = false;
nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
nvgpu_log_fn(g, "done"); nvgpu_log_fn(g, "done");
@@ -345,8 +328,6 @@ int nvgpu_pmu_destroy(struct gk20a *g)
static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu) static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
{ {
struct gk20a *g = gk20a_from_pmu(pmu); struct gk20a *g = gk20a_from_pmu(pmu);
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
struct boardobj *pboardobj, *pboardobj_tmp; struct boardobj *pboardobj, *pboardobj_tmp;
struct boardobjgrp *pboardobjgrp, *pboardobjgrp_tmp; struct boardobjgrp *pboardobjgrp, *pboardobjgrp_tmp;
@@ -371,18 +352,13 @@ static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu)
nvgpu_pmu_fw_release(g, pmu); nvgpu_pmu_fw_release(g, pmu);
if (nvgpu_mem_is_valid(&pmu->pmu_pg.seq_buf)) {
nvgpu_pmu_pg_free_seq_buf(pmu, vm);
}
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) {
nvgpu_pmu_super_surface_deinit(g, pmu, pmu->super_surface); nvgpu_pmu_super_surface_deinit(g, pmu, pmu->super_surface);
} }
nvgpu_pmu_lsfm_deinit(g, pmu, pmu->lsfm); nvgpu_pmu_lsfm_deinit(g, pmu, pmu->lsfm);
nvgpu_pmu_pg_deinit(g, pmu, pmu->pg);
nvgpu_mutex_destroy(&pmu->pmu_pg.elpg_mutex);
nvgpu_mutex_destroy(&pmu->pmu_pg.pg_mutex);
nvgpu_mutex_destroy(&pmu->isr_mutex); nvgpu_mutex_destroy(&pmu->isr_mutex);
nvgpu_pmu_sequences_free(g, &pmu->sequences); nvgpu_pmu_sequences_free(g, &pmu->sequences);
nvgpu_pmu_mutexes_free(g, &pmu->mutexes); nvgpu_pmu_mutexes_free(g, &pmu->mutexes);
@@ -411,16 +387,6 @@ int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu)
goto exit; goto exit;
} }
err = nvgpu_mutex_init(&pmu->pmu_pg.elpg_mutex);
if (err != 0) {
return err;
}
err = nvgpu_mutex_init(&pmu->pmu_pg.pg_mutex);
if (err != 0) {
goto init_failed;
}
err = nvgpu_mutex_init(&pmu->isr_mutex); err = nvgpu_mutex_init(&pmu->isr_mutex);
if (err != 0) { if (err != 0) {
goto init_failed; goto init_failed;
@@ -436,6 +402,12 @@ int nvgpu_early_init_pmu_sw(struct gk20a *g, struct nvgpu_pmu *pmu)
if (err != 0) { if (err != 0) {
goto init_failed; goto init_failed;
} }
if (g->can_elpg) {
err = nvgpu_pmu_pg_init(g, pmu, &pmu->pg);
if (err != 0) {
goto init_failed;
}
}
err = nvgpu_pmu_lsfm_init(g, &pmu->lsfm); err = nvgpu_pmu_lsfm_init(g, &pmu->lsfm);
if (err != 0) { if (err != 0) {
@@ -480,7 +452,11 @@ int nvgpu_pmu_lock_acquire(struct gk20a *g, struct nvgpu_pmu *pmu,
return 0; return 0;
} }
if (!pmu->pmu_pg.initialized) { if (!g->can_elpg) {
return 0;
}
if (!pmu->pg->initialized) {
return -EINVAL; return -EINVAL;
} }
@@ -494,7 +470,11 @@ int nvgpu_pmu_lock_release(struct gk20a *g, struct nvgpu_pmu *pmu,
return 0; return 0;
} }
if (!pmu->pmu_pg.initialized) { if (!g->can_elpg) {
return 0;
}
if (!pmu->pg->initialized) {
return -EINVAL; return -EINVAL;
} }

View File

@@ -106,7 +106,7 @@ void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu)
print_pmu_trace(pmu); print_pmu_trace(pmu);
nvgpu_err(g, "pmu state: %d", nvgpu_pmu_get_fw_state(g, pmu)); nvgpu_err(g, "pmu state: %d", nvgpu_pmu_get_fw_state(g, pmu));
nvgpu_err(g, "elpg state: %d", pmu->pmu_pg.elpg_stat); nvgpu_err(g, "elpg state: %d", pmu->pg->elpg_stat);
/* PMU may crash due to FECS crash. Dump FECS status */ /* PMU may crash due to FECS crash. Dump FECS status */
g->ops.gr.falcon.dump_stats(g); g->ops.gr.falcon.dump_stats(g);

View File

@@ -78,28 +78,6 @@
#define ACR_BOOT_TIMEDOUT 11U #define ACR_BOOT_TIMEDOUT 11U
#define ACR_BOOT_FAILED 12U #define ACR_BOOT_FAILED 12U
/*PG defines used by nvpgu-pmu*/
#define PMU_PG_IDLE_THRESHOLD_SIM 1000U
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000U
/* TBD: QT or else ? */
#define PMU_PG_IDLE_THRESHOLD 15000U
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000U
#define PMU_PG_LPWR_FEATURE_RPPG 0x0U
#define PMU_PG_LPWR_FEATURE_MSCG 0x1U
#define PMU_MSCG_DISABLED 0U
#define PMU_MSCG_ENABLED 1U
/* Default Sampling Period of AELPG */
#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000U)
/* Default values of APCTRL parameters */
#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100U)
#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000U)
#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000U)
#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200U)
/* pmu load const defines */ /* pmu load const defines */
#define PMU_BUSY_CYCLES_NORM_MAX (1000U) #define PMU_BUSY_CYCLES_NORM_MAX (1000U)
@@ -178,10 +156,7 @@ struct nvgpu_pmu {
struct nvgpu_allocator dmem; struct nvgpu_allocator dmem;
u32 mscg_stat; struct nvgpu_pmu_pg *pg;
u32 mscg_transition_state;
struct nvgpu_pmu_pg pmu_pg;
struct nvgpu_pmu_perfmon *pmu_perfmon; struct nvgpu_pmu_perfmon *pmu_perfmon;
void (*remove_support)(struct nvgpu_pmu *pmu); void (*remove_support)(struct nvgpu_pmu *pmu);
@@ -222,7 +197,6 @@ int nvgpu_pmu_reset(struct gk20a *g);
/* PMU debug */ /* PMU debug */
void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
void nvgpu_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu); struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu);

View File

@@ -36,6 +36,28 @@
struct nvgpu_pmu; struct nvgpu_pmu;
struct vm_gk20a; struct vm_gk20a;
/*PG defines used by nvpgu-pmu*/
#define PMU_PG_IDLE_THRESHOLD_SIM 1000U
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000U
/* TBD: QT or else ? */
#define PMU_PG_IDLE_THRESHOLD 15000U
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000U
#define PMU_PG_LPWR_FEATURE_RPPG 0x0U
#define PMU_PG_LPWR_FEATURE_MSCG 0x1U
#define PMU_MSCG_DISABLED 0U
#define PMU_MSCG_ENABLED 1U
/* Default Sampling Period of AELPG */
#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000U)
/* Default values of APCTRL parameters */
#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100U)
#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000U)
#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000U)
#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200U)
struct nvgpu_pg_init { struct nvgpu_pg_init {
bool state_change; bool state_change;
struct nvgpu_cond wq; struct nvgpu_cond wq;
@@ -59,6 +81,8 @@ struct nvgpu_pmu_pg {
u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE]; u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE];
struct nvgpu_mem seq_buf; struct nvgpu_mem seq_buf;
bool golden_image_initialized; bool golden_image_initialized;
u32 mscg_stat;
u32 mscg_transition_state;
}; };
/*PG defines used by nvpgu-pmu*/ /*PG defines used by nvpgu-pmu*/
@@ -71,11 +95,14 @@ struct pmu_pg_stats_data {
}; };
/* PG init*/ /* PG init*/
int nvgpu_init_task_pg_init(struct gk20a *g); int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,
int nvgpu_pg_init_task(void *arg); struct nvgpu_pmu_pg **pg);
int nvgpu_pmu_init_powergating(struct gk20a *g); void nvgpu_pmu_pg_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
int nvgpu_pmu_init_bind_fecs(struct gk20a *g); struct nvgpu_pmu_pg *pg);
void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g); int nvgpu_pmu_pg_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg);
void nvgpu_pmu_pg_destroy(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg);
/* PG enable/disable */ /* PG enable/disable */
int nvgpu_pmu_enable_elpg(struct gk20a *g); int nvgpu_pmu_enable_elpg(struct gk20a *g);
@@ -85,15 +112,11 @@ int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg);
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id, int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data); struct pmu_pg_stats_data *pg_stat_data);
void nvgpu_kill_task_pg_init(struct gk20a *g);
/* AELPG */ /* AELPG */
int nvgpu_aelpg_init(struct gk20a *g); int nvgpu_aelpg_init(struct gk20a *g);
int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id); int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
int nvgpu_pmu_ap_send_command(struct gk20a *g, int nvgpu_pmu_ap_send_command(struct gk20a *g,
union pmu_ap_cmd *p_ap_cmd, bool b_block); union pmu_ap_cmd *p_ap_cmd, bool b_block);
int nvgpu_pmu_pg_init_seq_buf(struct nvgpu_pmu *pmu, struct vm_gk20a *vm);
void nvgpu_pmu_pg_free_seq_buf(struct nvgpu_pmu *pmu, struct vm_gk20a *vm);
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized); void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized);

View File

@@ -38,16 +38,16 @@ static int lpwr_debug_show(struct seq_file *s, void *data)
"MSCG pstate state: %u\n" "MSCG pstate state: %u\n"
"MSCG transition state: %u\n", "MSCG transition state: %u\n",
g->ops.clk_arb.get_current_pstate(g), g->ops.clk_arb.get_current_pstate(g),
g->elpg_enabled, g->pmu.pmu_pg.elpg_refcnt, g->elpg_enabled, g->pmu.pg->elpg_refcnt,
g->pmu.pmu_pg.elpg_stat, g->mscg_enabled, g->pmu.pg->elpg_stat, g->mscg_enabled,
g->pmu.mscg_stat, g->pmu.mscg_transition_state); g->pmu.pg->mscg_stat, g->pmu.pg->mscg_transition_state);
} else } else
seq_printf(s, "ELPG Enabled: %u\n" seq_printf(s, "ELPG Enabled: %u\n"
"ELPG ref count: %u\n" "ELPG ref count: %u\n"
"ELPG state: %u\n", "ELPG state: %u\n",
g->elpg_enabled, g->pmu.pmu_pg.elpg_refcnt, g->elpg_enabled, g->pmu.pg->elpg_refcnt,
g->pmu.pmu_pg.elpg_stat); g->pmu.pg->elpg_stat);
return 0; return 0;

View File

@@ -197,13 +197,6 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false; nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon); nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon);
/* set default values to aelpg parameters */
g->pmu.pmu_pg.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US;
g->pmu.pmu_pg.aelpg_param[1] = APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US;
g->pmu.pmu_pg.aelpg_param[2] = APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US;
g->pmu.pmu_pg.aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US;
g->pmu.pmu_pg.aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
} }
nvgpu_set_enabled(g, NVGPU_SUPPORT_ASPM, !platform->disable_aspm); nvgpu_set_enabled(g, NVGPU_SUPPORT_ASPM, !platform->disable_aspm);

View File

@@ -538,8 +538,8 @@ static ssize_t mscg_enable_store(struct device *dev,
g->mscg_enabled = true; g->mscg_enabled = true;
if (g->ops.pmu.pmu_is_lpwr_feature_supported(g, if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
PMU_PG_LPWR_FEATURE_MSCG)) { PMU_PG_LPWR_FEATURE_MSCG)) {
if (!ACCESS_ONCE(pmu->mscg_stat)) { if (!ACCESS_ONCE(pmu->pg->mscg_stat)) {
WRITE_ONCE(pmu->mscg_stat, WRITE_ONCE(pmu->pg->mscg_stat,
PMU_MSCG_ENABLED); PMU_MSCG_ENABLED);
/* make status visible */ /* make status visible */
smp_mb(); smp_mb();
@@ -550,7 +550,7 @@ static ssize_t mscg_enable_store(struct device *dev,
if (g->ops.pmu.pmu_is_lpwr_feature_supported(g, if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
PMU_PG_LPWR_FEATURE_MSCG)) { PMU_PG_LPWR_FEATURE_MSCG)) {
nvgpu_pmu_pg_global_enable(g, false); nvgpu_pmu_pg_global_enable(g, false);
WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED); WRITE_ONCE(pmu->pg->mscg_stat, PMU_MSCG_DISABLED);
/* make status visible */ /* make status visible */
smp_mb(); smp_mb();
g->mscg_enabled = false; g->mscg_enabled = false;
@@ -584,7 +584,7 @@ static ssize_t aelpg_param_store(struct device *dev,
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
int status = 0; int status = 0;
union pmu_ap_cmd ap_cmd; union pmu_ap_cmd ap_cmd;
int *paramlist = (int *)g->pmu.pmu_pg.aelpg_param; int *paramlist = (int *)g->pmu.pg->aelpg_param;
u32 defaultparam[5] = { u32 defaultparam[5] = {
APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US, APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US,
APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US, APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US,
@@ -627,9 +627,9 @@ static ssize_t aelpg_param_read(struct device *dev,
struct gk20a *g = get_gk20a(dev); struct gk20a *g = get_gk20a(dev);
return snprintf(buf, PAGE_SIZE, return snprintf(buf, PAGE_SIZE,
"%d %d %d %d %d\n", g->pmu.pmu_pg.aelpg_param[0], "%d %d %d %d %d\n", g->pmu.pg->aelpg_param[0],
g->pmu.pmu_pg.aelpg_param[1], g->pmu.pmu_pg.aelpg_param[2], g->pmu.pg->aelpg_param[1], g->pmu.pg->aelpg_param[2],
g->pmu.pmu_pg.aelpg_param[3], g->pmu.pmu_pg.aelpg_param[4]); g->pmu.pg->aelpg_param[3], g->pmu.pg->aelpg_param[4]);
} }
static DEVICE_ATTR(aelpg_param, ROOTRW, static DEVICE_ATTR(aelpg_param, ROOTRW,