gpu: nvgpu: reorganize PMU init

- Moved PMU init code from pmu_gk20a.c to
"drivers/gpu/nvgpu/common/pmu/pmu.c" file
- Moved below related methods
  SW/HW init,
  init msg handler,
  deinit/destroy,
  PMU state machine
-Created HAL methods to read message queue tail
& supported mutex count.
-prepend with nvgpu_ for pmu init global
mehtods

JIRA NVGPU-56
JIRA NVGPU-92

Change-Id: Iea9efc194fefa74fb5641d2b2f4633577d2c3a47
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: http://git-master/r/1480002
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2017-06-06 15:26:32 +05:30
committed by mobile promotions
parent 914bb78a7d
commit 69dee6a648
11 changed files with 492 additions and 440 deletions

View File

@@ -58,6 +58,7 @@ nvgpu-y := \
common/rbtree.o \
common/vbios/bios.o \
common/falcon/falcon.o \
common/pmu/pmu.o \
common/pmu/pmu_ipc.o \
gk20a/gk20a.o \
gk20a/bus_gk20a.o \

View File

@@ -75,7 +75,7 @@ static int mscg_stat_show(struct seq_file *s, void *data)
if (err)
return err;
gk20a_pmu_get_pg_stats(g,
nvgpu_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data);
gk20a_idle(g);
}
@@ -133,7 +133,7 @@ static int mscg_transitions_show(struct seq_file *s, void *data)
if (err)
return err;
gk20a_pmu_get_pg_stats(g,
nvgpu_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_MS, &pg_stat_data);
gk20a_idle(g);
}
@@ -169,7 +169,7 @@ static int elpg_stat_show(struct seq_file *s, void *data)
if (err)
return err;
gk20a_pmu_get_pg_stats(g,
nvgpu_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
gk20a_idle(g);
}
@@ -226,7 +226,7 @@ static int elpg_transitions_show(struct seq_file *s, void *data)
if (err)
return err;
gk20a_pmu_get_pg_stats(g,
nvgpu_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
gk20a_idle(g);
}

View File

@@ -0,0 +1,425 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <nvgpu/pmu.h>
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
#include "gk20a/gk20a.h"
static int nvgpu_pg_init_task(void *arg);
static int nvgpu_init_task_pg_init(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
char thread_name[64];
int err = 0;
nvgpu_log_fn(g, " ");
nvgpu_cond_init(&pmu->pg_init.wq);
snprintf(thread_name, sizeof(thread_name),
"nvgpu_pg_init_%s", g->name);
err = nvgpu_thread_create(&pmu->pg_init.state_task, g,
nvgpu_pg_init_task, thread_name);
if (err)
nvgpu_err(g, "failed to start nvgpu_pg_init thread");
return err;
}
static int nvgpu_init_pmu_setup_sw(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
unsigned int i;
int err = 0;
u8 *ptr;
nvgpu_log_fn(g, " ");
/* start with elpg disabled until first enable call */
pmu->elpg_refcnt = 0;
/* Create thread to handle PMU state machine */
nvgpu_init_task_pg_init(g);
if (pmu->sw_ready) {
for (i = 0; i < pmu->mutex_cnt; i++) {
pmu->mutex[i].id = i;
pmu->mutex[i].index = i;
}
nvgpu_pmu_seq_init(pmu);
nvgpu_log_fn(g, "skip init");
goto skip_init;
}
/* no infoRom script from vbios? */
/* TBD: sysmon subtask */
if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON))
pmu->perfmon_sampling_enabled = true;
pmu->mutex_cnt = g->ops.pmu.pmu_mutex_size();
pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt *
sizeof(struct pmu_mutex));
if (!pmu->mutex) {
err = -ENOMEM;
goto err;
}
for (i = 0; i < pmu->mutex_cnt; i++) {
pmu->mutex[i].id = i;
pmu->mutex[i].index = i;
}
pmu->seq = nvgpu_kzalloc(g, PMU_MAX_NUM_SEQUENCES *
sizeof(struct pmu_sequence));
if (!pmu->seq) {
err = -ENOMEM;
goto err_free_mutex;
}
nvgpu_pmu_seq_init(pmu);
err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
&pmu->seq_buf);
if (err) {
nvgpu_err(g, "failed to allocate memory");
goto err_free_seq;
}
ptr = (u8 *)pmu->seq_buf.cpu_va;
/* TBD: remove this if ZBC save/restore is handled by PMU
* end an empty ZBC sequence for now
*/
ptr[0] = 0x16; /* opcode EXIT */
ptr[1] = 0; ptr[2] = 1; ptr[3] = 0;
ptr[4] = 0; ptr[5] = 0; ptr[6] = 0; ptr[7] = 0;
pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
&pmu->trace_buf);
if (err) {
nvgpu_err(g, "failed to allocate pmu trace buffer\n");
goto err_free_seq_buf;
}
pmu->sw_ready = true;
skip_init:
nvgpu_log_fn(g, "done");
return 0;
err_free_seq_buf:
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
err_free_seq:
nvgpu_kfree(g, pmu->seq);
err_free_mutex:
nvgpu_kfree(g, pmu->mutex);
err:
nvgpu_log_fn(g, "fail");
return err;
}
static int nvgpu_init_pmu_reset_enable_hw(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
nvgpu_log_fn(g, " ");
pmu_enable_hw(pmu, true);
return 0;
}
int nvgpu_init_pmu_support(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 err;
nvgpu_log_fn(g, " ");
if (pmu->initialized)
return 0;
err = nvgpu_init_pmu_reset_enable_hw(g);
if (err)
return err;
if (g->support_pmu) {
err = nvgpu_init_pmu_setup_sw(g);
if (err)
return err;
err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g);
if (err)
return err;
nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false);
}
return err;
}
int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
struct pmu_msg *msg)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct pmu_v *pv = &g->ops.pmu_ver;
union pmu_init_msg_pmu *init;
struct pmu_sha1_gid_data gid_data;
u32 i, tail = 0;
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "init received\n");
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET);
pmu_copy_from_dmem(pmu, tail,
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
nvgpu_err(g, "expecting init msg");
return -EINVAL;
}
pmu_copy_from_dmem(pmu, tail + PMU_MSG_HDR_SIZE,
(u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
nvgpu_err(g, "expecting init msg");
return -EINVAL;
}
tail += ALIGN(msg->hdr.size, PMU_DMEM_ALIGNMENT);
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET);
init = pv->get_pmu_msg_pmu_init_msg_ptr(&(msg->msg.init));
if (!pmu->gid_info.valid) {
pmu_copy_from_dmem(pmu,
pv->get_pmu_init_msg_pmu_sw_mg_off(init),
(u8 *)&gid_data,
sizeof(struct pmu_sha1_gid_data), 0);
pmu->gid_info.valid =
(*(u32 *)gid_data.signature == PMU_SHA1_GID_SIGNATURE);
if (pmu->gid_info.valid) {
BUG_ON(sizeof(pmu->gid_info.gid) !=
sizeof(gid_data.gid));
memcpy(pmu->gid_info.gid, gid_data.gid,
sizeof(pmu->gid_info.gid));
}
}
for (i = 0; i < PMU_QUEUE_COUNT; i++)
nvgpu_pmu_queue_init(pmu, i, init);
if (!nvgpu_alloc_initialized(&pmu->dmem)) {
/* Align start and end addresses */
u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init),
PMU_DMEM_ALLOC_ALIGNMENT);
u32 end = (pv->get_pmu_init_msg_pmu_sw_mg_off(init) +
pv->get_pmu_init_msg_pmu_sw_mg_size(init)) &
~(PMU_DMEM_ALLOC_ALIGNMENT - 1);
u32 size = end - start;
nvgpu_bitmap_allocator_init(g, &pmu->dmem, "gk20a_pmu_dmem",
start, size, PMU_DMEM_ALLOC_ALIGNMENT, 0);
}
pmu->pmu_ready = true;
nvgpu_pmu_state_change(g, PMU_STATE_INIT_RECEIVED, true);
nvgpu_pmu_dbg(g, "init received end\n");
return 0;
}
static void pmu_setup_hw_enable_elpg(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
nvgpu_log_fn(g, " ");
pmu->initialized = true;
nvgpu_pmu_state_change(g, PMU_STATE_STARTED, false);
if (g->ops.pmu_ver.is_pmu_zbc_save_supported) {
/* Save zbc table after PMU is initialized. */
pmu->zbc_ready = true;
gk20a_pmu_save_zbc(g, 0xf);
}
if (g->elpg_enabled) {
/* Init reg with prod values*/
if (g->ops.pmu.pmu_setup_elpg)
g->ops.pmu.pmu_setup_elpg(g);
gk20a_pmu_enable_elpg(g);
}
nvgpu_udelay(50);
/* Enable AELPG */
if (g->aelpg_enabled) {
gk20a_aelpg_init(g);
gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
}
}
void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
bool post_change_event)
{
struct nvgpu_pmu *pmu = &g->pmu;
nvgpu_pmu_dbg(g, "pmu_state - %d", pmu_state);
pmu->pmu_state = pmu_state;
if (post_change_event) {
pmu->pg_init.state_change = true;
nvgpu_cond_signal(&pmu->pg_init.wq);
}
/* make status visible */
smp_mb();
}
static int nvgpu_pg_init_task(void *arg)
{
struct gk20a *g = (struct gk20a *)arg;
struct nvgpu_pmu *pmu = &g->pmu;
struct nvgpu_pg_init *pg_init = &pmu->pg_init;
u32 pmu_state = 0;
nvgpu_log_fn(g, "thread start");
while (true) {
NVGPU_COND_WAIT(&pg_init->wq,
(pg_init->state_change == true), 0);
pmu->pg_init.state_change = false;
pmu_state = ACCESS_ONCE(pmu->pmu_state);
if (pmu_state == PMU_STATE_EXIT) {
nvgpu_pmu_dbg(g, "pmu state exit");
break;
}
switch (pmu_state) {
case PMU_STATE_INIT_RECEIVED:
nvgpu_pmu_dbg(g, "pmu starting");
if (g->can_elpg)
nvgpu_pmu_init_powergating(g);
break;
case PMU_STATE_ELPG_BOOTED:
nvgpu_pmu_dbg(g, "elpg booted");
nvgpu_pmu_init_bind_fecs(g);
break;
case PMU_STATE_LOADING_PG_BUF:
nvgpu_pmu_dbg(g, "loaded pg buf");
nvgpu_pmu_setup_hw_load_zbc(g);
break;
case PMU_STATE_LOADING_ZBC:
nvgpu_pmu_dbg(g, "loaded zbc");
pmu_setup_hw_enable_elpg(g);
break;
case PMU_STATE_STARTED:
nvgpu_pmu_dbg(g, "PMU booted");
break;
default:
nvgpu_pmu_dbg(g, "invalid state");
break;
}
}
while (!nvgpu_thread_should_stop(&pg_init->state_task))
nvgpu_msleep(5);
nvgpu_log_fn(g, "thread exit");
return 0;
}
int nvgpu_pmu_destroy(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_pg_stats_data pg_stat_data = { 0 };
struct nvgpu_timeout timeout;
int i;
nvgpu_log_fn(g, " ");
if (!g->support_pmu)
return 0;
/* make sure the pending operations are finished before we continue */
if (nvgpu_thread_is_running(&pmu->pg_init.state_task)) {
/* post PMU_STATE_EXIT to exit PMU state machine loop */
nvgpu_pmu_state_change(g, PMU_STATE_EXIT, true);
/* Make thread stop*/
nvgpu_thread_stop(&pmu->pg_init.state_task);
/* wait to confirm thread stopped */
nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
do {
if (!nvgpu_thread_is_running(&pmu->pg_init.state_task))
break;
nvgpu_udelay(2);
} while (!nvgpu_timeout_expired_msg(&timeout,
"timeout - waiting PMU state machine thread stop"));
}
nvgpu_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
gk20a_pmu_disable_elpg(g);
pmu->initialized = false;
/* update the s/w ELPG residency counters */
g->pg_ingating_time_us += (u64)pg_stat_data.ingating_time;
g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time;
g->pg_gating_cnt += pg_stat_data.gating_cnt;
nvgpu_mutex_acquire(&pmu->isr_mutex);
pmu->isr_enabled = false;
nvgpu_mutex_release(&pmu->isr_mutex);
for (i = 0; i < PMU_QUEUE_COUNT; i++)
nvgpu_mutex_destroy(&pmu->queue[i].mutex);
nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);
pmu->pmu_ready = false;
pmu->perfmon_ready = false;
pmu->zbc_ready = false;
g->ops.pmu.lspmuwprinitdone = false;
g->ops.pmu.fecsbootstrapdone = false;
nvgpu_log_fn(g, "done");
return 0;
}

View File

@@ -24,6 +24,7 @@
#include <nvgpu/timers.h>
#include <nvgpu/soc.h>
#include <nvgpu/enabled.h>
#include <nvgpu/pmu.h>
#include <trace/events/gk20a.h>
@@ -130,7 +131,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
/* disable elpg before gr or fifo suspend */
if (g->ops.pmu.is_pmu_supported(g))
ret |= gk20a_pmu_destroy(g);
ret |= nvgpu_pmu_destroy(g);
ret |= gk20a_gr_suspend(g);
ret |= gk20a_mm_suspend(g);
@@ -259,7 +260,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
#endif
if (g->ops.pmu.is_pmu_supported(g)) {
err = gk20a_init_pmu_support(g);
err = nvgpu_init_pmu_support(g);
if (err) {
nvgpu_err(g, "failed to init gk20a pmu");
goto done;

View File

@@ -741,6 +741,9 @@ struct gpu_ops {
struct pmu_queue *queue, u32 *head, bool set);
int (*pmu_queue_tail)(struct nvgpu_pmu *pmu,
struct pmu_queue *queue, u32 *tail, bool set);
void (*pmu_msgq_tail)(struct nvgpu_pmu *pmu,
u32 *tail, bool set);
u32 (*pmu_mutex_size)(void);
int (*pmu_mutex_acquire)(struct nvgpu_pmu *pmu,
u32 id, u32 *token);
int (*pmu_mutex_release)(struct nvgpu_pmu *pmu,

View File

@@ -43,19 +43,10 @@
#define gk20a_dbg_pmu(fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
int gk20a_pmu_get_pg_stats(struct gk20a *g,
u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data);
static void ap_callback_init_and_enable_ctrl(
struct gk20a *g, struct pmu_msg *msg,
void *param, u32 seq_desc, u32 status);
static int nvgpu_init_task_pg_init(struct gk20a *g);
static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
bool post_change_event);
static int pmu_init_powergating(struct gk20a *g);
static u32 pmu_perfmon_cntr_sz_v0(struct nvgpu_pmu *pmu)
{
return sizeof(struct pmu_perfmon_counter_v0);
@@ -2783,6 +2774,25 @@ int gk20a_pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
return 0;
}
void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
{
struct gk20a *g = gk20a_from_pmu(pmu);
u32 queue_tail_size = 0;
if (g->ops.pmu.pmu_get_queue_tail_size)
queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
BUG_ON(!tail || !queue_tail_size);
if (!set)
*tail = pwr_pmu_msgq_tail_val_v(
gk20a_readl(g, pwr_pmu_msgq_tail_r()));
else
gk20a_writel(g,
pwr_pmu_msgq_tail_r(),
pwr_pmu_msgq_tail_val_f(*tail));
}
void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu)
{
struct gk20a *g = gk20a_from_pmu(pmu);
@@ -2801,17 +2811,6 @@ void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu)
nvgpu_mutex_destroy(&pmu->pmu_seq_lock);
}
static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
gk20a_dbg_fn("");
pmu_enable_hw(pmu, true);
return 0;
}
static int gk20a_prepare_ucode(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
@@ -2851,103 +2850,6 @@ static int gk20a_prepare_ucode(struct gk20a *g)
return err;
}
static int gk20a_init_pmu_setup_sw(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = mm->pmu.vm;
unsigned int i;
int err = 0;
u8 *ptr;
gk20a_dbg_fn("");
/* start with elpg disabled until first enable call */
pmu->elpg_refcnt = 0;
/* Create thread to handle PMU state machine */
nvgpu_init_task_pg_init(g);
if (pmu->sw_ready) {
for (i = 0; i < pmu->mutex_cnt; i++) {
pmu->mutex[i].id = i;
pmu->mutex[i].index = i;
}
nvgpu_pmu_seq_init(pmu);
gk20a_dbg_fn("skip init");
goto skip_init;
}
/* no infoRom script from vbios? */
/* TBD: sysmon subtask */
if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON))
pmu->perfmon_sampling_enabled = true;
pmu->mutex_cnt = pwr_pmu_mutex__size_1_v();
pmu->mutex = nvgpu_kzalloc(g, pmu->mutex_cnt *
sizeof(struct pmu_mutex));
if (!pmu->mutex) {
err = -ENOMEM;
goto err;
}
for (i = 0; i < pmu->mutex_cnt; i++) {
pmu->mutex[i].id = i;
pmu->mutex[i].index = i;
}
pmu->seq = nvgpu_kzalloc(g, PMU_MAX_NUM_SEQUENCES *
sizeof(struct pmu_sequence));
if (!pmu->seq) {
err = -ENOMEM;
goto err_free_mutex;
}
nvgpu_pmu_seq_init(pmu);
err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
&pmu->seq_buf);
if (err) {
nvgpu_err(g, "failed to allocate memory");
goto err_free_seq;
}
ptr = (u8 *)pmu->seq_buf.cpu_va;
/* TBD: remove this if ZBC save/restore is handled by PMU
* end an empty ZBC sequence for now */
ptr[0] = 0x16; /* opcode EXIT */
ptr[1] = 0; ptr[2] = 1; ptr[3] = 0;
ptr[4] = 0; ptr[5] = 0; ptr[6] = 0; ptr[7] = 0;
pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
&pmu->trace_buf);
if (err) {
nvgpu_err(g, "failed to allocate pmu trace buffer");
goto err_free_seq_buf;
}
pmu->sw_ready = true;
skip_init:
gk20a_dbg_fn("done");
return 0;
err_free_seq_buf:
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
err_free_seq:
nvgpu_kfree(g, pmu->seq);
err_free_mutex:
nvgpu_kfree(g, pmu->mutex);
err:
gk20a_dbg_fn("fail");
return err;
}
static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{
@@ -3006,99 +2908,7 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
}
static void pmu_setup_hw_load_zbc(struct gk20a *g);
static void pmu_setup_hw_enable_elpg(struct gk20a *g);
static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
bool post_change_event)
{
struct nvgpu_pmu *pmu = &g->pmu;
pmu->pmu_state = pmu_state;
if (post_change_event) {
pmu->pg_init.state_change = true;
nvgpu_cond_signal(&pmu->pg_init.wq);
}
/* make status visible */
smp_mb();
}
static int nvgpu_pg_init_task(void *arg)
{
struct gk20a *g = (struct gk20a *)arg;
struct nvgpu_pmu *pmu = &g->pmu;
struct nvgpu_pg_init *pg_init = &pmu->pg_init;
u32 pmu_state = 0;
while (true) {
NVGPU_COND_WAIT(&pg_init->wq,
(pg_init->state_change == true), 0);
pmu->pg_init.state_change = false;
pmu_state = ACCESS_ONCE(pmu->pmu_state);
if (pmu_state == PMU_STATE_EXIT) {
gk20a_dbg_pmu("pmu state exit");
break;
}
switch (pmu_state) {
case PMU_STATE_INIT_RECEIVED:
gk20a_dbg_pmu("pmu starting");
if (g->can_elpg)
pmu_init_powergating(g);
break;
case PMU_STATE_ELPG_BOOTED:
gk20a_dbg_pmu("elpg booted");
gk20a_init_pmu_bind_fecs(g);
break;
case PMU_STATE_LOADING_PG_BUF:
gk20a_dbg_pmu("loaded pg buf");
pmu_setup_hw_load_zbc(g);
break;
case PMU_STATE_LOADING_ZBC:
gk20a_dbg_pmu("loaded zbc");
pmu_setup_hw_enable_elpg(g);
break;
case PMU_STATE_STARTED:
gk20a_dbg_pmu("PMU booted");
break;
default:
gk20a_dbg_pmu("invalid state");
break;
}
}
while (!nvgpu_thread_should_stop(&pg_init->state_task))
nvgpu_msleep(5);
return 0;
}
static int nvgpu_init_task_pg_init(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
char thread_name[64];
int err = 0;
nvgpu_cond_init(&pmu->pg_init.wq);
snprintf(thread_name, sizeof(thread_name),
"nvgpu_pg_init_%s", g->name);
err = nvgpu_thread_create(&pmu->pg_init.state_task, g,
nvgpu_pg_init_task, thread_name);
if (err)
nvgpu_err(g, "failed to start nvgpu_pg_init thread");
return err;
}
int gk20a_init_pmu_bind_fecs(struct gk20a *g)
int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
@@ -3137,7 +2947,7 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g)
return err;
}
static void pmu_setup_hw_load_zbc(struct gk20a *g)
void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
@@ -3172,43 +2982,6 @@ static void pmu_setup_hw_load_zbc(struct gk20a *g)
nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
}
static void pmu_setup_hw_enable_elpg(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
/*
* FIXME: To enable ELPG, we increase the PMU ext2priv timeout unit to
* 7. This prevents PMU stalling on Host register accesses. Once the
* cause for this hang is discovered and fixed, this WAR should be
* removed.
*/
gk20a_writel(g, 0x10a164, 0x109ff);
pmu->initialized = true;
nvgpu_pmu_state_change(g, PMU_STATE_STARTED, false);
if (g->ops.pmu_ver.is_pmu_zbc_save_supported) {
/* Save zbc table after PMU is initialized. */
pmu->zbc_ready = true;
gk20a_pmu_save_zbc(g, 0xf);
}
if (g->elpg_enabled) {
/* Init reg with prod values*/
if (g->ops.pmu.pmu_setup_elpg)
g->ops.pmu.pmu_setup_elpg(g);
gk20a_pmu_enable_elpg(g);
}
nvgpu_udelay(50);
/* Enable AELPG */
if (g->aelpg_enabled) {
gk20a_aelpg_init(g);
gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
}
}
static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
{
gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
@@ -3254,6 +3027,8 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
gops->pmu.pmu_setup_elpg = NULL;
@@ -3279,34 +3054,6 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.reset = gk20a_pmu_reset;
}
int gk20a_init_pmu_support(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 err;
gk20a_dbg_fn("");
if (pmu->initialized)
return 0;
err = gk20a_init_pmu_reset_enable_hw(g);
if (err)
return err;
if (g->support_pmu) {
err = gk20a_init_pmu_setup_sw(g);
if (err)
return err;
err = g->ops.pmu.pmu_setup_hw_and_bootstrap(g);
if (err)
return err;
nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false);
}
return err;
}
static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{
@@ -3453,7 +3200,8 @@ static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
return 0;
}
static int pmu_init_powergating(struct gk20a *g)
int nvgpu_pmu_init_powergating(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
u32 pg_engine_id;
@@ -3613,84 +3361,6 @@ int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
return 0;
}
int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
struct pmu_msg *msg)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct pmu_v *pv = &g->ops.pmu_ver;
union pmu_init_msg_pmu *init;
struct pmu_sha1_gid_data gid_data;
u32 i, tail = 0;
gk20a_dbg_pmu("init received\n");
tail = pwr_pmu_msgq_tail_val_v(
gk20a_readl(g, pwr_pmu_msgq_tail_r()));
pmu_copy_from_dmem(pmu, tail,
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
nvgpu_err(g, "expecting init msg");
return -EINVAL;
}
pmu_copy_from_dmem(pmu, tail + PMU_MSG_HDR_SIZE,
(u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
nvgpu_err(g, "expecting init msg");
return -EINVAL;
}
tail += ALIGN(msg->hdr.size, PMU_DMEM_ALIGNMENT);
gk20a_writel(g, pwr_pmu_msgq_tail_r(),
pwr_pmu_msgq_tail_val_f(tail));
init = pv->get_pmu_msg_pmu_init_msg_ptr(&(msg->msg.init));
if (!pmu->gid_info.valid) {
pmu_copy_from_dmem(pmu,
pv->get_pmu_init_msg_pmu_sw_mg_off(init),
(u8 *)&gid_data,
sizeof(struct pmu_sha1_gid_data), 0);
pmu->gid_info.valid =
(*(u32 *)gid_data.signature == PMU_SHA1_GID_SIGNATURE);
if (pmu->gid_info.valid) {
BUG_ON(sizeof(pmu->gid_info.gid) !=
sizeof(gid_data.gid));
memcpy(pmu->gid_info.gid, gid_data.gid,
sizeof(pmu->gid_info.gid));
}
}
for (i = 0; i < PMU_QUEUE_COUNT; i++)
nvgpu_pmu_queue_init(pmu, i, init);
if (!nvgpu_alloc_initialized(&pmu->dmem)) {
/* Align start and end addresses */
u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init),
PMU_DMEM_ALLOC_ALIGNMENT);
u32 end = (pv->get_pmu_init_msg_pmu_sw_mg_off(init) +
pv->get_pmu_init_msg_pmu_sw_mg_size(init)) &
~(PMU_DMEM_ALLOC_ALIGNMENT - 1);
u32 size = end - start;
nvgpu_bitmap_allocator_init(g, &pmu->dmem, "gk20a_pmu_dmem",
start, size,
PMU_DMEM_ALLOC_ALIGNMENT, 0);
}
pmu->pmu_ready = true;
nvgpu_pmu_state_change(g, PMU_STATE_INIT_RECEIVED, true);
gk20a_dbg_pmu("init received end\n");
return 0;
}
static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{
@@ -4413,66 +4083,6 @@ int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable)
return err;
}
int gk20a_pmu_destroy(struct gk20a *g)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_pg_stats_data pg_stat_data = { 0 };
struct nvgpu_timeout timeout;
int i;
gk20a_dbg_fn("");
if (!g->support_pmu)
return 0;
/* make sure the pending operations are finished before we continue */
if (nvgpu_thread_is_running(&pmu->pg_init.state_task)) {
/* post PMU_STATE_EXIT to exit PMU state machine loop */
nvgpu_pmu_state_change(g, PMU_STATE_EXIT, true);
/* Make thread stop*/
nvgpu_thread_stop(&pmu->pg_init.state_task);
/* wait to confirm thread stopped */
nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
do {
if (!nvgpu_thread_is_running(&pmu->pg_init.state_task))
break;
nvgpu_udelay(2);
} while (!nvgpu_timeout_expired_msg(&timeout,
"timeout - waiting PMU state machine thread stop"));
}
gk20a_pmu_get_pg_stats(g,
PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
gk20a_pmu_disable_elpg(g);
pmu->initialized = false;
/* update the s/w ELPG residency counters */
g->pg_ingating_time_us += (u64)pg_stat_data.ingating_time;
g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time;
g->pg_gating_cnt += pg_stat_data.gating_cnt;
nvgpu_mutex_acquire(&pmu->isr_mutex);
pmu->isr_enabled = false;
nvgpu_mutex_release(&pmu->isr_mutex);
for (i = 0; i < PMU_QUEUE_COUNT; i++)
nvgpu_mutex_destroy(&pmu->queue[i].mutex);
nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);
pmu->pmu_ready = false;
pmu->perfmon_ready = false;
pmu->zbc_ready = false;
g->ops.pmu.lspmuwprinitdone = false;
g->ops.pmu.fecsbootstrapdone = false;
gk20a_dbg_fn("done");
return 0;
}
int gk20a_pmu_load_norm(struct gk20a *g, u32 *load)
{
*load = g->pmu.load_shadow;
@@ -4543,8 +4153,7 @@ void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us;
}
int gk20a_pmu_get_pg_stats(struct gk20a *g,
u32 pg_engine_id,
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data)
{
struct nvgpu_pmu *pmu = &g->pmu;

View File

@@ -63,15 +63,6 @@ struct pmu_surface {
struct flcn_mem_desc_v0 params;
};
/*PG defines used by nvpgu-pmu*/
struct pmu_pg_stats_data {
u32 gating_cnt;
u32 ingating_time;
u32 ungating_time;
u32 avg_entry_latency_us;
u32 avg_exit_latency_us;
};
#define PMU_PG_IDLE_THRESHOLD_SIM 1000
#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
/* TBD: QT or else ? */
@@ -105,10 +96,6 @@ struct pmu_pg_stats_data {
#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
/*PG defines used by nvpgu-pmu*/
int gk20a_init_pmu_support(struct gk20a *g);
int gk20a_init_pmu_bind_fecs(struct gk20a *g);
bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu);
void gk20a_pmu_isr(struct gk20a *g);
@@ -131,8 +118,8 @@ int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
u32 *head, bool set);
int gk20a_pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
u32 *tail, bool set);
void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set);
int gk20a_pmu_destroy(struct gk20a *g);
int gk20a_pmu_load_norm(struct gk20a *g, u32 *load);
int gk20a_pmu_load_update(struct gk20a *g);
void gk20a_pmu_reset_load_counters(struct gk20a *g);
@@ -174,8 +161,6 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
u32 size);
int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
u32 size);
int gk20a_pmu_get_pg_stats(struct gk20a *g,
u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data);
bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu);

View File

@@ -294,6 +294,8 @@ void gm20b_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
gops->pmu.lspmuwprinitdone = 0;

View File

@@ -424,6 +424,8 @@ void gp106_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
gops->pmu.lspmuwprinitdone = 0;

View File

@@ -415,6 +415,8 @@ void gp10b_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
gops->pmu.pmu_queue_head = gk20a_pmu_queue_head;
gops->pmu.pmu_queue_tail = gk20a_pmu_queue_tail;
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_mutex_acquire = gk20a_pmu_mutex_acquire;
gops->pmu.pmu_mutex_release = gk20a_pmu_mutex_release;
gops->pmu.lspmuwprinitdone = false;

View File

@@ -335,6 +335,15 @@ struct nvgpu_pmu {
struct nvgpu_firmware *fw;
};
/*PG defines used by nvpgu-pmu*/
struct pmu_pg_stats_data {
u32 gating_cnt;
u32 ingating_time;
u32 ungating_time;
u32 avg_entry_latency_us;
u32 avg_exit_latency_us;
};
/* PMU IPC Methods */
void nvgpu_pmu_seq_init(struct nvgpu_pmu *pmu);
@@ -361,7 +370,20 @@ int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
struct nv_pmu_therm_msg *msg);
/* PMU init */
int nvgpu_init_pmu_support(struct gk20a *g);
int nvgpu_pmu_destroy(struct gk20a *g);
int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
struct pmu_msg *msg);
struct pmu_msg *msg);
void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
bool post_change_event);
/* PG */
int nvgpu_pmu_init_powergating(struct gk20a *g);
int nvgpu_pmu_init_bind_fecs(struct gk20a *g);
void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g);
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data);
#endif /* __NVGPU_PMU_H__ */