mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: pmu: add check before enabling elpg.
Do not enable/disable elpg if platform->can_elpg is false. Bug 1870556 Change-Id: I82d1fc4efdccc518827a6150fd3c17f6112e2f4a Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: http://git-master/r/1465816 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
d0955cad82
commit
5a4926f291
@@ -1206,6 +1206,7 @@ static inline void get_exception_mmu_fault_info(
|
|||||||
void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
||||||
{
|
{
|
||||||
struct fifo_gk20a *f = NULL;
|
struct fifo_gk20a *f = NULL;
|
||||||
|
struct gk20a_platform *platform;
|
||||||
u32 engine_enum = ENGINE_INVAL_GK20A;
|
u32 engine_enum = ENGINE_INVAL_GK20A;
|
||||||
u32 inst_id = 0;
|
u32 inst_id = 0;
|
||||||
struct fifo_engine_info_gk20a *engine_info;
|
struct fifo_engine_info_gk20a *engine_info;
|
||||||
@@ -1216,6 +1217,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
f = &g->fifo;
|
f = &g->fifo;
|
||||||
|
platform = dev_get_drvdata(g->dev);
|
||||||
|
|
||||||
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
|
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
|
||||||
|
|
||||||
@@ -1228,7 +1230,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
|||||||
nvgpu_err(g, "unsupported engine_id %d", engine_id);
|
nvgpu_err(g, "unsupported engine_id %d", engine_id);
|
||||||
|
|
||||||
if (engine_enum == ENGINE_GR_GK20A) {
|
if (engine_enum == ENGINE_GR_GK20A) {
|
||||||
if (g->support_pmu && g->elpg_enabled)
|
if (g->support_pmu && platform->can_elpg)
|
||||||
gk20a_pmu_disable_elpg(g);
|
gk20a_pmu_disable_elpg(g);
|
||||||
/* resetting engine will alter read/write index.
|
/* resetting engine will alter read/write index.
|
||||||
* need to flush circular buffer before re-enabling FECS.
|
* need to flush circular buffer before re-enabling FECS.
|
||||||
@@ -1241,7 +1243,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
|||||||
/* resetting engine using mc_enable_r() is not
|
/* resetting engine using mc_enable_r() is not
|
||||||
enough, we do full init sequence */
|
enough, we do full init sequence */
|
||||||
gk20a_gr_reset(g);
|
gk20a_gr_reset(g);
|
||||||
if (g->support_pmu && g->elpg_enabled)
|
if (g->support_pmu && platform->can_elpg)
|
||||||
gk20a_pmu_enable_elpg(g);
|
gk20a_pmu_enable_elpg(g);
|
||||||
}
|
}
|
||||||
if ((engine_enum == ENGINE_GRCE_GK20A) ||
|
if ((engine_enum == ENGINE_GRCE_GK20A) ||
|
||||||
@@ -1466,6 +1468,7 @@ static bool gk20a_fifo_handle_mmu_fault(
|
|||||||
bool id_is_tsg)
|
bool id_is_tsg)
|
||||||
{
|
{
|
||||||
bool fake_fault;
|
bool fake_fault;
|
||||||
|
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
|
||||||
unsigned long fault_id;
|
unsigned long fault_id;
|
||||||
unsigned long engine_mmu_fault_id;
|
unsigned long engine_mmu_fault_id;
|
||||||
bool verbose = true;
|
bool verbose = true;
|
||||||
@@ -1476,7 +1479,7 @@ static bool gk20a_fifo_handle_mmu_fault(
|
|||||||
g->fifo.deferred_reset_pending = false;
|
g->fifo.deferred_reset_pending = false;
|
||||||
|
|
||||||
/* Disable power management */
|
/* Disable power management */
|
||||||
if (g->support_pmu && g->elpg_enabled)
|
if (g->support_pmu && platform->can_elpg)
|
||||||
gk20a_pmu_disable_elpg(g);
|
gk20a_pmu_disable_elpg(g);
|
||||||
if (g->ops.clock_gating.slcg_gr_load_gating_prod)
|
if (g->ops.clock_gating.slcg_gr_load_gating_prod)
|
||||||
g->ops.clock_gating.slcg_gr_load_gating_prod(g,
|
g->ops.clock_gating.slcg_gr_load_gating_prod(g,
|
||||||
@@ -1675,8 +1678,9 @@ static bool gk20a_fifo_handle_mmu_fault(
|
|||||||
gr_gpfifo_ctl_semaphore_access_enabled_f());
|
gr_gpfifo_ctl_semaphore_access_enabled_f());
|
||||||
|
|
||||||
/* It is safe to enable ELPG again. */
|
/* It is safe to enable ELPG again. */
|
||||||
if (g->support_pmu && g->elpg_enabled)
|
if (g->support_pmu && platform->can_elpg)
|
||||||
gk20a_pmu_enable_elpg(g);
|
gk20a_pmu_enable_elpg(g);
|
||||||
|
|
||||||
return verbose;
|
return verbose;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3126,6 +3126,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
|||||||
struct nvgpu_alloc_obj_ctx_args *args)
|
struct nvgpu_alloc_obj_ctx_args *args)
|
||||||
{
|
{
|
||||||
struct gk20a *g = c->g;
|
struct gk20a *g = c->g;
|
||||||
|
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
|
||||||
struct fifo_gk20a *f = &g->fifo;
|
struct fifo_gk20a *f = &g->fifo;
|
||||||
struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
|
struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
|
||||||
struct tsg_gk20a *tsg = NULL;
|
struct tsg_gk20a *tsg = NULL;
|
||||||
@@ -3280,7 +3281,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
|
|||||||
|
|
||||||
args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO;
|
args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO;
|
||||||
|
|
||||||
if (g->support_pmu)
|
if (g->support_pmu && platform->can_elpg)
|
||||||
gk20a_pmu_enable_elpg(g);
|
gk20a_pmu_enable_elpg(g);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4936,6 +4936,7 @@ clean_up:
|
|||||||
int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
|
int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
|
||||||
{
|
{
|
||||||
u32 status = 0;
|
u32 status = 0;
|
||||||
|
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
|
||||||
|
|
||||||
if (enable_pg == true) {
|
if (enable_pg == true) {
|
||||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||||
@@ -4945,7 +4946,7 @@ int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
|
|||||||
if (g->ops.pmu.pmu_lpwr_enable_pg)
|
if (g->ops.pmu.pmu_lpwr_enable_pg)
|
||||||
status = g->ops.pmu.pmu_lpwr_enable_pg(g,
|
status = g->ops.pmu.pmu_lpwr_enable_pg(g,
|
||||||
true);
|
true);
|
||||||
} else if (g->support_pmu)
|
} else if (g->support_pmu && platform->can_elpg)
|
||||||
status = gk20a_pmu_enable_elpg(g);
|
status = gk20a_pmu_enable_elpg(g);
|
||||||
} else if (enable_pg == false) {
|
} else if (enable_pg == false) {
|
||||||
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
if (g->ops.pmu.pmu_pg_engines_feature_list &&
|
||||||
@@ -4955,7 +4956,7 @@ int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
|
|||||||
if (g->ops.pmu.pmu_lpwr_disable_pg)
|
if (g->ops.pmu.pmu_lpwr_disable_pg)
|
||||||
status = g->ops.pmu.pmu_lpwr_disable_pg(g,
|
status = g->ops.pmu.pmu_lpwr_disable_pg(g,
|
||||||
true);
|
true);
|
||||||
} else if (g->support_pmu)
|
} else if (g->support_pmu && platform->can_elpg)
|
||||||
status = gk20a_pmu_disable_elpg(g);
|
status = gk20a_pmu_disable_elpg(g);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -337,6 +337,7 @@ u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
|
|||||||
int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
|
int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
|
||||||
{
|
{
|
||||||
struct pmu_gk20a *pmu = &g->pmu;
|
struct pmu_gk20a *pmu = &g->pmu;
|
||||||
|
struct gk20a_platform *platform = dev_get_drvdata(g->dev);
|
||||||
u32 status = 0;
|
u32 status = 0;
|
||||||
u32 is_mscg_supported = 0;
|
u32 is_mscg_supported = 0;
|
||||||
u32 is_rppg_supported = 0;
|
u32 is_rppg_supported = 0;
|
||||||
@@ -363,7 +364,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
|
|||||||
is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
|
is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
|
||||||
present_pstate);
|
present_pstate);
|
||||||
if (is_rppg_supported) {
|
if (is_rppg_supported) {
|
||||||
if (g->support_pmu && g->elpg_enabled)
|
if (g->support_pmu && platform->can_elpg)
|
||||||
status = gk20a_pmu_enable_elpg(g);
|
status = gk20a_pmu_enable_elpg(g);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user