mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: update golden image flag for RG seq
The flag pmu->pg->golden_image_initialized is set to true during initial GPU context creation and is not cleared while the GPU goes into pm_suspend (during railgate). Hence, when the GPU resumes after un-railgate it retains the previous value which can cause ELPG to kick in immediately. Due to this, when ELPG and Railgating are enabled, IDLE_SNAP is seen for read access of gr_gpc0_tpc0_sm_arch_r reg. To resolve this, if golden image is ready set the pmu->pg->golden_image_initialized to suspend state during railgate, to delay the early enable of ELPG. Add a new pmu_init_golden_img_state hal in the NVGPU_INIT_TABLE_ENTRY. This will be called after all the GR access is done and GPU resumes completely after un-railgate. This hal will then check if golden_image_initialized flag is in suspend state, it will set it to ready state and then re-enable ELPG. Bug 3431798 Change-Id: I1fee83e66e09b6b78d385bbe60529d0724f79e79 Signed-off-by: Divya <dsinghatwari@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2639188 Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
@@ -750,7 +750,7 @@ int nvgpu_gr_obj_ctx_alloc_golden_ctx_image(struct gk20a *g,
|
|||||||
|
|
||||||
golden_image->ready = true;
|
golden_image->ready = true;
|
||||||
#ifdef CONFIG_NVGPU_POWER_PG
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
nvgpu_pmu_set_golden_image_initialized(g, true);
|
nvgpu_pmu_set_golden_image_initialized(g, GOLDEN_IMG_READY);
|
||||||
#endif
|
#endif
|
||||||
g->ops.gr.falcon.set_current_ctx_invalid(g);
|
g->ops.gr.falcon.set_current_ctx_invalid(g);
|
||||||
|
|
||||||
@@ -984,7 +984,7 @@ void nvgpu_gr_obj_ctx_deinit(struct gk20a *g,
|
|||||||
golden_image->local_golden_image = NULL;
|
golden_image->local_golden_image = NULL;
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_NVGPU_POWER_PG
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
nvgpu_pmu_set_golden_image_initialized(g, false);
|
nvgpu_pmu_set_golden_image_initialized(g, GOLDEN_IMG_NOT_READY);
|
||||||
#endif
|
#endif
|
||||||
golden_image->ready = false;
|
golden_image->ready = false;
|
||||||
nvgpu_kfree(g, golden_image);
|
nvgpu_kfree(g, golden_image);
|
||||||
|
|||||||
@@ -58,6 +58,10 @@
|
|||||||
#include <nvgpu/pmu/pmu_pstate.h>
|
#include <nvgpu/pmu/pmu_pstate.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
#include <nvgpu/pmu/pmu_pg.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
bool is_nvgpu_gpu_state_valid(struct gk20a *g)
|
bool is_nvgpu_gpu_state_valid(struct gk20a *g)
|
||||||
{
|
{
|
||||||
u32 boot_0 = g->ops.mc.get_chip_details(g, NULL, NULL, NULL);
|
u32 boot_0 = g->ops.mc.get_chip_details(g, NULL, NULL, NULL);
|
||||||
@@ -916,6 +920,10 @@ int nvgpu_finalize_poweron(struct gk20a *g)
|
|||||||
NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_syncpt_mem, NO_FLAG),
|
NVGPU_INIT_TABLE_ENTRY(&nvgpu_init_syncpt_mem, NO_FLAG),
|
||||||
#ifdef CONFIG_NVGPU_PROFILER
|
#ifdef CONFIG_NVGPU_PROFILER
|
||||||
NVGPU_INIT_TABLE_ENTRY(&nvgpu_pm_reservation_init, NO_FLAG),
|
NVGPU_INIT_TABLE_ENTRY(&nvgpu_pm_reservation_init, NO_FLAG),
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
NVGPU_INIT_TABLE_ENTRY(g->ops.pmu.pmu_restore_golden_img_state,
|
||||||
|
NO_FLAG),
|
||||||
#endif
|
#endif
|
||||||
NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch,
|
NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch,
|
||||||
NO_FLAG),
|
NO_FLAG),
|
||||||
|
|||||||
@@ -34,6 +34,7 @@
|
|||||||
#include <nvgpu/pmu/fw.h>
|
#include <nvgpu/pmu/fw.h>
|
||||||
#include <nvgpu/pmu/debug.h>
|
#include <nvgpu/pmu/debug.h>
|
||||||
#include <nvgpu/pmu/pmu_pg.h>
|
#include <nvgpu/pmu/pmu_pg.h>
|
||||||
|
#include <nvgpu/atomic.h>
|
||||||
|
|
||||||
#include "pg_sw_gm20b.h"
|
#include "pg_sw_gm20b.h"
|
||||||
#include "pg_sw_gv11b.h"
|
#include "pg_sw_gv11b.h"
|
||||||
@@ -273,7 +274,8 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
|
|||||||
/* do NOT enable elpg until golden ctx is created,
|
/* do NOT enable elpg until golden ctx is created,
|
||||||
* which is related with the ctx that ELPG save and restore.
|
* which is related with the ctx that ELPG save and restore.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!pmu->pg->golden_image_initialized)) {
|
if (unlikely((nvgpu_atomic_read(&pmu->pg->golden_image_initialized)) !=
|
||||||
|
GOLDEN_IMG_READY)) {
|
||||||
goto exit_unlock;
|
goto exit_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -612,7 +614,8 @@ int nvgpu_pmu_enable_elpg_ms(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* do NOT enable elpg_ms until golden ctx is created */
|
/* do NOT enable elpg_ms until golden ctx is created */
|
||||||
if (unlikely(!pmu->pg->golden_image_initialized)) {
|
if (unlikely((nvgpu_atomic_read(&pmu->pg->golden_image_initialized)) !=
|
||||||
|
GOLDEN_IMG_READY)) {
|
||||||
goto exit_unlock;
|
goto exit_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -962,13 +965,25 @@ int nvgpu_pmu_pg_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
|
|||||||
pg->elpg_refcnt = 0;
|
pg->elpg_refcnt = 0;
|
||||||
pg->elpg_ms_refcnt = 0;
|
pg->elpg_ms_refcnt = 0;
|
||||||
|
|
||||||
/* skip seq_buf alloc during unrailgate path */
|
/* During un-railgate path, skip seq_buf alloc
|
||||||
|
* and do not update golden_image_initialized flag
|
||||||
|
* in un-railgate path.
|
||||||
|
*/
|
||||||
if (!nvgpu_mem_is_valid(&pg->seq_buf)) {
|
if (!nvgpu_mem_is_valid(&pg->seq_buf)) {
|
||||||
err = pmu_pg_init_seq_buf(g, pmu, pg);
|
err = pmu_pg_init_seq_buf(g, pmu, pg);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "failed to allocate memory");
|
nvgpu_err(g, "failed to allocate memory");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* During first boot set golden_image_intialized
|
||||||
|
* to not_ready.
|
||||||
|
* This will set to ready state after golden
|
||||||
|
* ctx is created.
|
||||||
|
*/
|
||||||
|
nvgpu_atomic_set(&pg->golden_image_initialized,
|
||||||
|
GOLDEN_IMG_NOT_READY);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nvgpu_thread_is_running(&pg->pg_init.state_task)) {
|
if (nvgpu_thread_is_running(&pg->pg_init.state_task)) {
|
||||||
@@ -1001,6 +1016,17 @@ void nvgpu_pmu_pg_destroy(struct gk20a *g, struct nvgpu_pmu *pmu,
|
|||||||
g->pg_ingating_time_us += (u64)pg_stat_data.ingating_time;
|
g->pg_ingating_time_us += (u64)pg_stat_data.ingating_time;
|
||||||
g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time;
|
g->pg_ungating_time_us += (u64)pg_stat_data.ungating_time;
|
||||||
g->pg_gating_cnt += pg_stat_data.gating_cnt;
|
g->pg_gating_cnt += pg_stat_data.gating_cnt;
|
||||||
|
/*
|
||||||
|
* if golden image is ready then set the
|
||||||
|
* golden_image_initialized to suspended state as
|
||||||
|
* part of railgate sequence. This will be set to
|
||||||
|
* ready in un-railgate sequence.
|
||||||
|
*/
|
||||||
|
if (nvgpu_atomic_read(&pg->golden_image_initialized) ==
|
||||||
|
GOLDEN_IMG_READY) {
|
||||||
|
nvgpu_atomic_set(&pg->golden_image_initialized,
|
||||||
|
GOLDEN_IMG_SUSPEND);
|
||||||
|
}
|
||||||
|
|
||||||
pg->zbc_ready = false;
|
pg->zbc_ready = false;
|
||||||
}
|
}
|
||||||
@@ -1097,7 +1123,7 @@ void nvgpu_pmu_pg_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
|
|||||||
nvgpu_kfree(g, pg);
|
nvgpu_kfree(g, pg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized)
|
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, u8 state)
|
||||||
{
|
{
|
||||||
struct nvgpu_pmu *pmu = g->pmu;
|
struct nvgpu_pmu *pmu = g->pmu;
|
||||||
|
|
||||||
@@ -1105,7 +1131,7 @@ void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
pmu->pg->golden_image_initialized = initialized;
|
nvgpu_atomic_set(&pmu->pg->golden_image_initialized, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
int nvgpu_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||||
@@ -1168,3 +1194,31 @@ void *nvgpu_pmu_pg_buf_get_cpu_va(struct gk20a *g, struct nvgpu_pmu *pmu)
|
|||||||
|
|
||||||
return pmu->pg->pg_buf.cpu_va;
|
return pmu->pg->pg_buf.cpu_va;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int nvgpu_pmu_restore_golden_img_state(struct gk20a *g)
|
||||||
|
{
|
||||||
|
struct nvgpu_pmu *pmu = g->pmu;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (!is_pg_supported(g, pmu->pg)) {
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nvgpu_atomic_read(&pmu->pg->golden_image_initialized) ==
|
||||||
|
GOLDEN_IMG_SUSPEND) {
|
||||||
|
/*
|
||||||
|
* this becomes part of un-railgate sequence.
|
||||||
|
* set the golden_image_initialized to ready state
|
||||||
|
* and re-enable elpg.
|
||||||
|
*/
|
||||||
|
nvgpu_atomic_set(&pmu->pg->golden_image_initialized,
|
||||||
|
GOLDEN_IMG_READY);
|
||||||
|
err = nvgpu_pmu_reenable_elpg(g);
|
||||||
|
if (err != 0) {
|
||||||
|
nvgpu_err(g, "fail to re-enable elpg");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|||||||
@@ -69,7 +69,9 @@
|
|||||||
#include <nvgpu/nvhost.h>
|
#include <nvgpu/nvhost.h>
|
||||||
#include <nvgpu/clk_mon.h>
|
#include <nvgpu/clk_mon.h>
|
||||||
#include <nvgpu/profiler.h>
|
#include <nvgpu/profiler.h>
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
#include <nvgpu/pmu/pmu_pg.h>
|
||||||
|
#endif
|
||||||
#include "hal/mm/mm_gm20b.h"
|
#include "hal/mm/mm_gm20b.h"
|
||||||
#include "hal/mm/mm_gp10b.h"
|
#include "hal/mm/mm_gp10b.h"
|
||||||
#include "hal/mm/mm_gv11b.h"
|
#include "hal/mm/mm_gv11b.h"
|
||||||
@@ -1278,6 +1280,9 @@ static const struct gops_pmu ga100_ops_pmu = {
|
|||||||
.ecc_free = gv11b_pmu_ecc_free,
|
.ecc_free = gv11b_pmu_ecc_free,
|
||||||
/* Init */
|
/* Init */
|
||||||
.pmu_early_init = nvgpu_pmu_early_init,
|
.pmu_early_init = nvgpu_pmu_early_init,
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
.pmu_restore_golden_img_state = nvgpu_pmu_restore_golden_img_state,
|
||||||
|
#endif
|
||||||
.pmu_rtos_init = nvgpu_pmu_rtos_init,
|
.pmu_rtos_init = nvgpu_pmu_rtos_init,
|
||||||
.pmu_pstate_sw_setup = nvgpu_pmu_pstate_sw_setup,
|
.pmu_pstate_sw_setup = nvgpu_pmu_pstate_sw_setup,
|
||||||
.pmu_pstate_pmu_setup = nvgpu_pmu_pstate_pmu_setup,
|
.pmu_pstate_pmu_setup = nvgpu_pmu_pstate_pmu_setup,
|
||||||
|
|||||||
@@ -47,6 +47,9 @@
|
|||||||
#include <nvgpu/pmu/pmu_perfmon.h>
|
#include <nvgpu/pmu/pmu_perfmon.h>
|
||||||
#endif
|
#endif
|
||||||
#include <nvgpu/profiler.h>
|
#include <nvgpu/profiler.h>
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
#include <nvgpu/pmu/pmu_pg.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "hal/mm/mm_gp10b.h"
|
#include "hal/mm/mm_gp10b.h"
|
||||||
#include "hal/mm/mm_gv11b.h"
|
#include "hal/mm/mm_gv11b.h"
|
||||||
@@ -1303,6 +1306,9 @@ static const struct gops_pmu ga10b_ops_pmu = {
|
|||||||
*/
|
*/
|
||||||
/* Basic init ops */
|
/* Basic init ops */
|
||||||
.pmu_early_init = nvgpu_pmu_early_init,
|
.pmu_early_init = nvgpu_pmu_early_init,
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
.pmu_restore_golden_img_state = nvgpu_pmu_restore_golden_img_state,
|
||||||
|
#endif
|
||||||
.is_pmu_supported = ga10b_is_pmu_supported,
|
.is_pmu_supported = ga10b_is_pmu_supported,
|
||||||
.falcon_base_addr = gv11b_pmu_falcon_base_addr,
|
.falcon_base_addr = gv11b_pmu_falcon_base_addr,
|
||||||
.falcon2_base_addr = ga10b_pmu_falcon2_base_addr,
|
.falcon2_base_addr = ga10b_pmu_falcon2_base_addr,
|
||||||
|
|||||||
@@ -56,6 +56,9 @@
|
|||||||
#include <nvgpu/therm.h>
|
#include <nvgpu/therm.h>
|
||||||
#include <nvgpu/clk_arb.h>
|
#include <nvgpu/clk_arb.h>
|
||||||
#include <nvgpu/grmgr.h>
|
#include <nvgpu/grmgr.h>
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
#include <nvgpu/pmu/pmu_pg.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "hal/mm/mm_gk20a.h"
|
#include "hal/mm/mm_gk20a.h"
|
||||||
#include "hal/mm/mm_gm20b.h"
|
#include "hal/mm/mm_gm20b.h"
|
||||||
@@ -816,6 +819,9 @@ static const struct gops_pmu gm20b_ops_pmu = {
|
|||||||
.pmu_pstate_pmu_setup = nvgpu_pmu_pstate_pmu_setup,
|
.pmu_pstate_pmu_setup = nvgpu_pmu_pstate_pmu_setup,
|
||||||
.pmu_destroy = nvgpu_pmu_destroy,
|
.pmu_destroy = nvgpu_pmu_destroy,
|
||||||
.pmu_early_init = nvgpu_pmu_early_init,
|
.pmu_early_init = nvgpu_pmu_early_init,
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
.pmu_restore_golden_img_state = nvgpu_pmu_restore_golden_img_state,
|
||||||
|
#endif
|
||||||
.pmu_rtos_init = nvgpu_pmu_rtos_init,
|
.pmu_rtos_init = nvgpu_pmu_rtos_init,
|
||||||
.is_pmu_supported = gm20b_is_pmu_supported,
|
.is_pmu_supported = gm20b_is_pmu_supported,
|
||||||
.falcon_base_addr = gk20a_pmu_falcon_base_addr,
|
.falcon_base_addr = gk20a_pmu_falcon_base_addr,
|
||||||
|
|||||||
@@ -43,6 +43,9 @@
|
|||||||
#include <nvgpu/pmu/pmu_perfmon.h>
|
#include <nvgpu/pmu/pmu_perfmon.h>
|
||||||
#endif
|
#endif
|
||||||
#include <nvgpu/profiler.h>
|
#include <nvgpu/profiler.h>
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
#include <nvgpu/pmu/pmu_pg.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "hal/mm/mm_gp10b.h"
|
#include "hal/mm/mm_gp10b.h"
|
||||||
#include "hal/mm/mm_gv11b.h"
|
#include "hal/mm/mm_gv11b.h"
|
||||||
@@ -1115,6 +1118,9 @@ static const struct gops_pmu gv11b_ops_pmu = {
|
|||||||
*/
|
*/
|
||||||
/* Basic init ops */
|
/* Basic init ops */
|
||||||
.pmu_early_init = nvgpu_pmu_early_init,
|
.pmu_early_init = nvgpu_pmu_early_init,
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
.pmu_restore_golden_img_state = nvgpu_pmu_restore_golden_img_state,
|
||||||
|
#endif
|
||||||
.is_pmu_supported = gv11b_is_pmu_supported,
|
.is_pmu_supported = gv11b_is_pmu_supported,
|
||||||
.falcon_base_addr = gv11b_pmu_falcon_base_addr,
|
.falcon_base_addr = gv11b_pmu_falcon_base_addr,
|
||||||
.pmu_reset = nvgpu_pmu_reset,
|
.pmu_reset = nvgpu_pmu_reset,
|
||||||
|
|||||||
@@ -104,6 +104,9 @@ struct gops_pmu {
|
|||||||
*/
|
*/
|
||||||
int (*pmu_early_init)(struct gk20a *g);
|
int (*pmu_early_init)(struct gk20a *g);
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVGPU_POWER_PG
|
||||||
|
int (*pmu_restore_golden_img_state)(struct gk20a *g);
|
||||||
|
#endif
|
||||||
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_LS_PMU
|
#ifdef CONFIG_NVGPU_LS_PMU
|
||||||
|
|||||||
@@ -63,6 +63,13 @@ struct rpc_handler_payload;
|
|||||||
#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000U)
|
#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000U)
|
||||||
#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200U)
|
#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200U)
|
||||||
|
|
||||||
|
/* State of golden image */
|
||||||
|
enum {
|
||||||
|
GOLDEN_IMG_NOT_READY = 0,
|
||||||
|
GOLDEN_IMG_SUSPEND,
|
||||||
|
GOLDEN_IMG_READY,
|
||||||
|
};
|
||||||
|
|
||||||
struct nvgpu_pg_init {
|
struct nvgpu_pg_init {
|
||||||
bool state_change;
|
bool state_change;
|
||||||
bool state_destroy;
|
bool state_destroy;
|
||||||
@@ -90,7 +97,7 @@ struct nvgpu_pmu_pg {
|
|||||||
bool initialized;
|
bool initialized;
|
||||||
u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE];
|
u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE];
|
||||||
struct nvgpu_mem seq_buf;
|
struct nvgpu_mem seq_buf;
|
||||||
bool golden_image_initialized;
|
nvgpu_atomic_t golden_image_initialized;
|
||||||
u32 mscg_stat;
|
u32 mscg_stat;
|
||||||
u32 mscg_transition_state;
|
u32 mscg_transition_state;
|
||||||
int (*elpg_statistics)(struct gk20a *g, u32 pg_engine_id,
|
int (*elpg_statistics)(struct gk20a *g, u32 pg_engine_id,
|
||||||
@@ -142,6 +149,7 @@ int nvgpu_pmu_pg_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
|
|||||||
struct nvgpu_pmu_pg *pg);
|
struct nvgpu_pmu_pg *pg);
|
||||||
void nvgpu_pmu_pg_destroy(struct gk20a *g, struct nvgpu_pmu *pmu,
|
void nvgpu_pmu_pg_destroy(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||||
struct nvgpu_pmu_pg *pg);
|
struct nvgpu_pmu_pg *pg);
|
||||||
|
int nvgpu_pmu_restore_golden_img_state(struct gk20a *g);
|
||||||
|
|
||||||
/* PG enable/disable */
|
/* PG enable/disable */
|
||||||
int nvgpu_pmu_reenable_elpg(struct gk20a *g);
|
int nvgpu_pmu_reenable_elpg(struct gk20a *g);
|
||||||
@@ -161,7 +169,7 @@ int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
|
|||||||
int nvgpu_pmu_ap_send_command(struct gk20a *g,
|
int nvgpu_pmu_ap_send_command(struct gk20a *g,
|
||||||
union pmu_ap_cmd *p_ap_cmd, bool b_block);
|
union pmu_ap_cmd *p_ap_cmd, bool b_block);
|
||||||
|
|
||||||
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, bool initialized);
|
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, u8 state);
|
||||||
|
|
||||||
/* PG ops*/
|
/* PG ops*/
|
||||||
int nvgpu_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
int nvgpu_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
|
||||||
|
|||||||
Reference in New Issue
Block a user