mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: add wrapper for MS_LTC disallow/allow
- add separate wrapper function for sending ALLOW and DISALLOW RPCs for MS_LTC engine - add separate SW blocker function for MS_LTC Bug 200763448 Change-Id: I80b6c59f6acaec03ab9fcd2e1ce82817f55124b2 Signed-off-by: Divya <dsinghatwari@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2603122 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
@@ -585,7 +585,7 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
case PMU_UNIT_PG_LOADING:
|
||||
case PMU_UNIT_PG:
|
||||
if (pmu->pg->rpc_handler != NULL) {
|
||||
pmu->pg->rpc_handler(g, pmu, &rpc);
|
||||
pmu->pg->rpc_handler(g, pmu, &rpc, rpc_payload);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -292,10 +292,12 @@ static int ga10b_pmu_pg_load_buff(struct gk20a *g, struct nvgpu_pmu *pmu)
|
||||
}
|
||||
|
||||
static void ga10b_pg_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct nv_pmu_rpc_header *rpc)
|
||||
struct nv_pmu_rpc_header *rpc, struct rpc_handler_payload *rpc_payload)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
struct pmu_rpc_struct_lpwr_pg_ctrl_allow *rpc_allow;
|
||||
struct pmu_rpc_struct_lpwr_pg_ctrl_disallow *rpc_disallow;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
switch (rpc->function) {
|
||||
case NV_PMU_RPC_ID_PG_LOADING_PRE_INIT:
|
||||
nvgpu_pmu_dbg(g, "Reply to PG_PRE_INIT");
|
||||
@@ -320,11 +322,25 @@ static void ga10b_pg_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
break;
|
||||
case NV_PMU_RPC_ID_PG_ALLOW:
|
||||
nvgpu_pmu_dbg(g, "Reply to PG_ALLOW");
|
||||
rpc_allow = (struct pmu_rpc_struct_lpwr_pg_ctrl_allow *)rpc_payload->rpc_buff;
|
||||
if (rpc_allow->ctrl_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
pmu->pg->elpg_stat = PMU_ELPG_STAT_ON;
|
||||
} else if (rpc_allow->ctrl_id == PMU_PG_ELPG_ENGINE_ID_MS_LTC) {
|
||||
pmu->pg->elpg_ms_stat = PMU_ELPG_MS_STAT_ON;
|
||||
} else {
|
||||
nvgpu_err(g, "Invalid pg_engine_id");
|
||||
}
|
||||
break;
|
||||
case NV_PMU_RPC_ID_PG_DISALLOW:
|
||||
nvgpu_pmu_dbg(g, "Reply to PG_DISALLOW");
|
||||
rpc_disallow = (struct pmu_rpc_struct_lpwr_pg_ctrl_disallow *)(void *)rpc_payload->rpc_buff;
|
||||
if (rpc_disallow->ctrl_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF;
|
||||
} else if (rpc_disallow->ctrl_id == PMU_PG_ELPG_ENGINE_ID_MS_LTC) {
|
||||
pmu->pg->elpg_ms_stat = PMU_ELPG_MS_STAT_OFF;
|
||||
} else {
|
||||
nvgpu_err(g, "Invalid pg_engine_id");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g,
|
||||
|
||||
@@ -294,12 +294,17 @@ int nvgpu_pmu_enable_elpg(struct gk20a *g)
|
||||
pmu->pg->mscg_stat == PMU_MSCG_DISABLED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) {
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
ret = pmu_enable_elpg_locked(g, pg_engine_id);
|
||||
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS_LTC) {
|
||||
ret = nvgpu_pmu_enable_elpg_ms(g);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
nvgpu_err(g, "Inavlid pg_engine_id");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exit_unlock:
|
||||
nvgpu_mutex_release(&pmu->pg->elpg_mutex);
|
||||
nvgpu_log_fn(g, "done");
|
||||
@@ -399,6 +404,12 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
|
||||
pmu->pg->mscg_transition_state =
|
||||
PMU_ELPG_STAT_OFF_PENDING;
|
||||
}
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS_LTC) {
|
||||
ret = nvgpu_pmu_disable_elpg_ms(g);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
ptr = &pmu->pg->elpg_stat;
|
||||
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
|
||||
@@ -468,6 +479,148 @@ exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_disable_elpg_ms(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
int ret = 0;
|
||||
u32 *ptr = NULL;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!is_pg_supported(g, pmu->pg)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->pg->elpg_ms_mutex);
|
||||
|
||||
pmu->pg->elpg_ms_refcnt = nvgpu_safe_sub_s32(
|
||||
pmu->pg->elpg_ms_refcnt, 1);
|
||||
if (pmu->pg->elpg_ms_refcnt > 0) {
|
||||
nvgpu_warn(g,
|
||||
"%s(): possible elpg_ms refcnt mismatch. elpg_ms refcnt=%d",
|
||||
__func__, pmu->pg->elpg_ms_refcnt);
|
||||
WARN_ON(true);
|
||||
ret = 0;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* cancel off_on_pending and return */
|
||||
if (pmu->pg->elpg_ms_stat == PMU_ELPG_MS_STAT_OFF_ON_PENDING) {
|
||||
pmu->pg->elpg_ms_stat = PMU_ELPG_MS_STAT_OFF;
|
||||
ret = 0;
|
||||
goto exit_unlock;
|
||||
} else if (pmu->pg->elpg_ms_stat == PMU_ELPG_MS_STAT_ON_PENDING) {
|
||||
/* wait if on_pending */
|
||||
pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g),
|
||||
&pmu->pg->elpg_ms_stat, PMU_ELPG_MS_STAT_ON);
|
||||
|
||||
if (pmu->pg->elpg_ms_stat != PMU_ELPG_MS_STAT_ON) {
|
||||
nvgpu_err(g, "ELPG_MS_ALLOW_ACK failed, elpg_ms_stat=%d",
|
||||
pmu->pg->elpg_ms_stat);
|
||||
pmu_dump_elpg_stats(pmu);
|
||||
nvgpu_pmu_dump_falcon_stats(pmu);
|
||||
ret = -EBUSY;
|
||||
goto exit_unlock;
|
||||
}
|
||||
} else if (pmu->pg->elpg_ms_stat != PMU_ELPG_MS_STAT_ON) {
|
||||
/* return if ELPG_MS is already off */
|
||||
ret = 0;
|
||||
nvgpu_err(g, "ELPG_MS already disabled");
|
||||
goto exit_unlock;
|
||||
} else {
|
||||
pmu->pg->elpg_ms_stat = PMU_ELPG_MS_STAT_OFF_PENDING;
|
||||
ptr = &pmu->pg->elpg_ms_stat;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW for MS_LTC");
|
||||
if (pmu->pg->disallow == NULL) {
|
||||
nvgpu_err(g,
|
||||
"PG disallow function not assigned");
|
||||
ret = -EINVAL;
|
||||
goto exit_unlock;
|
||||
}
|
||||
ret = pmu->pg->disallow(g, pmu, PMU_PG_ELPG_ENGINE_ID_MS_LTC);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW "
|
||||
"cmd post for MS_LTC failed");
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
pmu_wait_message_cond(pmu,
|
||||
nvgpu_get_poll_timeout(g),
|
||||
ptr, PMU_ELPG_MS_STAT_OFF);
|
||||
if (*ptr != PMU_ELPG_MS_STAT_OFF) {
|
||||
nvgpu_err(g, "ELPG_MS_DISALLOW_ACK failed");
|
||||
pmu_dump_elpg_stats(pmu);
|
||||
nvgpu_pmu_dump_falcon_stats(pmu);
|
||||
ret = -EBUSY;
|
||||
goto exit_unlock;
|
||||
}
|
||||
}
|
||||
exit_unlock:
|
||||
nvgpu_mutex_release(&pmu->pg->elpg_ms_mutex);
|
||||
nvgpu_log_fn(g, "done");
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_enable_elpg_ms(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
int status = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!is_pg_supported(g, g->pmu->pg)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&pmu->pg->elpg_ms_mutex);
|
||||
|
||||
pmu->pg->elpg_ms_refcnt = nvgpu_safe_add_s32(
|
||||
pmu->pg->elpg_ms_refcnt, 1);
|
||||
if (pmu->pg->elpg_ms_refcnt <= 0) {
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* something is not right if we end up in following code path */
|
||||
if (unlikely(pmu->pg->elpg_ms_refcnt > 1)) {
|
||||
nvgpu_warn(g,
|
||||
"%s(): possible elpg_ms_refcnt mismatch.elpg_ms refcnt=%d",
|
||||
__func__, pmu->pg->elpg_ms_refcnt);
|
||||
WARN_ON(true);
|
||||
}
|
||||
|
||||
/* do NOT enable elpg_ms until golden ctx is created */
|
||||
if (unlikely(!pmu->pg->golden_image_initialized)) {
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
if (pmu->pg->elpg_ms_stat != PMU_ELPG_MS_STAT_OFF) {
|
||||
nvgpu_err(g, "ELPG_MS already enabled");
|
||||
}
|
||||
|
||||
pmu->pg->elpg_ms_stat = PMU_ELPG_MS_STAT_ON_PENDING;
|
||||
|
||||
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW for MS_LTC");
|
||||
if (pmu->pg->allow == NULL) {
|
||||
nvgpu_err(g, "PG allow function not assigned");
|
||||
status = -EINVAL;
|
||||
goto exit_unlock;
|
||||
}
|
||||
status = pmu->pg->allow(g, pmu, PMU_PG_ELPG_ENGINE_ID_MS_LTC);
|
||||
|
||||
if (status != 0) {
|
||||
nvgpu_log_fn(g, "PG allow for MS_LTC FAILED err=%d",
|
||||
status);
|
||||
} else {
|
||||
nvgpu_log_fn(g, "done");
|
||||
}
|
||||
|
||||
exit_unlock:
|
||||
nvgpu_mutex_release(&pmu->pg->elpg_ms_mutex);
|
||||
nvgpu_log_fn(g, "done");
|
||||
return status;
|
||||
}
|
||||
|
||||
/* PG init */
|
||||
void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 status)
|
||||
@@ -789,8 +942,9 @@ int nvgpu_pmu_pg_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* start with elpg disabled until first enable call */
|
||||
/* start with elpg and elpg_ms disabled until first enable call */
|
||||
pg->elpg_refcnt = 0;
|
||||
pg->elpg_ms_refcnt = 0;
|
||||
|
||||
/* skip seq_buf alloc during unrailgate path */
|
||||
if (!nvgpu_mem_is_valid(&pg->seq_buf)) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -40,6 +40,19 @@
|
||||
*/
|
||||
#define PMU_ELPG_STAT_OFF_ON_PENDING 4U
|
||||
|
||||
/* elpg_ms is off */
|
||||
#define PMU_ELPG_MS_STAT_OFF 0U
|
||||
/* elpg_ms is on */
|
||||
#define PMU_ELPG_MS_STAT_ON 1U
|
||||
/* elpg_ms is off, ALLOW cmd has been sent, wait for ack */
|
||||
#define PMU_ELPG_MS_STAT_ON_PENDING 2U
|
||||
/* elpg_ms is on, DISALLOW cmd has been sent, wait for ack */
|
||||
#define PMU_ELPG_MS_STAT_OFF_PENDING 3U
|
||||
/* elpg_ms is off, caller has requested on, but ALLOW
|
||||
* cmd hasn't been sent due to ENABLE_ALLOW delay
|
||||
*/
|
||||
#define PMU_ELPG_MS_STAT_OFF_ON_PENDING 4U
|
||||
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_INIT 0U
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_ZBC 1U
|
||||
#define PMU_PGENG_GR_BUFFER_IDX_FECS 2U
|
||||
|
||||
@@ -210,6 +210,7 @@ int nvgpu_pmu_early_init(struct gk20a *g)
|
||||
g->can_elpg = false;
|
||||
g->elpg_enabled = false;
|
||||
g->aelpg_enabled = false;
|
||||
g->elpg_ms_enabled = false;
|
||||
nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, false);
|
||||
nvgpu_set_enabled(g, NVGPU_ELPG_MS_ENABLED, false);
|
||||
#ifdef CONFIG_NVGPU_DGPU
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -116,3 +116,50 @@ done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_pg_elpg_ms_enable(struct gk20a *g)
|
||||
{
|
||||
int err = 0;
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!g->can_elpg) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (g->pmu->pg->initialized) {
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if ((g->elpg_enabled) && (g->elpg_ms_enabled)) {
|
||||
err = nvgpu_pmu_enable_elpg_ms(g);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_pg_elpg_ms_disable(struct gk20a *g)
|
||||
{
|
||||
int err = 0;
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!g->can_elpg) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (g->pmu->pg->initialized) {
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if ((g->elpg_enabled) && (g->elpg_ms_enabled)) {
|
||||
err = nvgpu_pmu_disable_elpg_ms(g);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -556,6 +556,7 @@ struct gk20a {
|
||||
bool elcg_enabled;
|
||||
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
||||
bool elpg_enabled;
|
||||
bool elpg_ms_enabled;
|
||||
bool aelpg_enabled;
|
||||
bool can_elpg;
|
||||
bool mscg_enabled;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -32,10 +32,12 @@
|
||||
#include <nvgpu/pmu/pmuif/pg.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <include/nvgpu/pmu.h>
|
||||
|
||||
struct nvgpu_pmu;
|
||||
struct vm_gk20a;
|
||||
struct pmu_pg_stats_data;
|
||||
struct rpc_handler_payload;
|
||||
|
||||
/*PG defines used by nvpgu-pmu*/
|
||||
#define PMU_PG_SEQ_BUF_SIZE 4096U
|
||||
@@ -70,12 +72,15 @@ struct nvgpu_pg_init {
|
||||
|
||||
struct nvgpu_pmu_pg {
|
||||
u32 elpg_stat;
|
||||
u32 elpg_ms_stat;
|
||||
#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1U /* msec */
|
||||
struct nvgpu_pg_init pg_init;
|
||||
struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */
|
||||
struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */
|
||||
struct nvgpu_mutex elpg_ms_mutex; /* protect elpg_ms enable/disable */
|
||||
/* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */
|
||||
int elpg_refcnt;
|
||||
int elpg_ms_refcnt;
|
||||
u32 aelpg_param[5];
|
||||
bool zbc_ready;
|
||||
bool zbc_save_done;
|
||||
@@ -113,7 +118,7 @@ struct nvgpu_pmu_pg {
|
||||
int (*load_buff)(struct gk20a *g, struct nvgpu_pmu *pmu);
|
||||
int (*hw_load_zbc)(struct gk20a *g, struct nvgpu_pmu *pmu);
|
||||
void (*rpc_handler)(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct nv_pmu_rpc_header *rpc);
|
||||
struct nv_pmu_rpc_header *rpc, struct rpc_handler_payload *rpc_payload);
|
||||
int (*init_send)(struct gk20a *g, struct nvgpu_pmu *pmu, u8 pg_engine_id);
|
||||
};
|
||||
|
||||
@@ -140,6 +145,9 @@ void nvgpu_pmu_pg_destroy(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
int nvgpu_pmu_reenable_elpg(struct gk20a *g);
|
||||
int nvgpu_pmu_enable_elpg(struct gk20a *g);
|
||||
int nvgpu_pmu_disable_elpg(struct gk20a *g);
|
||||
int nvgpu_pmu_enable_elpg_ms(struct gk20a *g);
|
||||
int nvgpu_pmu_disable_elpg_ms(struct gk20a *g);
|
||||
|
||||
int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg);
|
||||
|
||||
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
|
||||
|
||||
@@ -42,12 +42,29 @@ struct gk20a;
|
||||
} \
|
||||
err; \
|
||||
})
|
||||
|
||||
#define nvgpu_pg_elpg_ms_protected_call(g, func) \
|
||||
({ \
|
||||
int status = 0; \
|
||||
status = nvgpu_pg_elpg_ms_disable(g);\
|
||||
if (status != 0) {\
|
||||
(void)nvgpu_pg_elpg_ms_enable(g);\
|
||||
} \
|
||||
if (status == 0) { \
|
||||
status = (func); \
|
||||
(void)nvgpu_pg_elpg_ms_enable(g);\
|
||||
} \
|
||||
status; \
|
||||
})
|
||||
#else
|
||||
#define nvgpu_pg_elpg_protected_call(g, func) func
|
||||
#define nvgpu_pg_elpg_ms_protected_call(g, func) func
|
||||
#endif
|
||||
|
||||
int nvgpu_pg_elpg_disable(struct gk20a *g);
|
||||
int nvgpu_pg_elpg_enable(struct gk20a *g);
|
||||
int nvgpu_pg_elpg_ms_disable(struct gk20a *g);
|
||||
int nvgpu_pg_elpg_ms_enable(struct gk20a *g);
|
||||
bool nvgpu_pg_elpg_is_enabled(struct gk20a *g);
|
||||
int nvgpu_pg_elpg_set_elpg_enabled(struct gk20a *g, bool enable);
|
||||
|
||||
|
||||
@@ -223,6 +223,7 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
|
||||
if (g->elpg_enabled) {
|
||||
nvgpu_set_enabled(g, NVGPU_ELPG_MS_ENABLED,
|
||||
platform->enable_elpg_ms);
|
||||
g->elpg_ms_enabled = platform->enable_elpg_ms;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user