mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: update compiling out cg changes
nvgpu_cg_pg_enable|disable functions are non-safe hence compile out power_features.c. Corresponding functions from cg.c are also not compiled. for e.g. nvgpu_cg_elcg_enable|disable, nvgpu_cg_blcg- _mode_enable|disable, nvgpu_cg_slcg_gr_perf_ltc_load_enable|disable, nvgpu_cg_elcg_set_elcg|blcg|slcg_enabled. BLCG handling in nvgpu_cg_set_mode is non-safe hence compile it out as well. JIRA NVGPU-2175 Change-Id: I9940cc418d84eb30979dd50a2ed4a132473312fe Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2168957 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
3e288dd836
commit
3444d729fd
@@ -945,11 +945,10 @@ sync:
|
||||
sources: [ common/sync/channel_sync_semaphore.c,
|
||||
include/nvgpu/channel_sync_semaphore.h ]
|
||||
power_features:
|
||||
safe: no
|
||||
owner: Seema K
|
||||
children:
|
||||
power_features:
|
||||
safe: yes
|
||||
safe: no
|
||||
sources: [ common/power_features/power_features.c,
|
||||
include/nvgpu/power_features/power_features.h ]
|
||||
cg:
|
||||
|
||||
@@ -165,6 +165,7 @@ CONFIG_NVGPU_HAL_NON_FUSA := 1
|
||||
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_HAL_NON_FUSA
|
||||
|
||||
# Enable non FUSA common code for normal build
|
||||
CONFIG_NVGPU_NON_FUSA := 1
|
||||
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_NON_FUSA
|
||||
|
||||
CONFIG_NVGPU_CLK_ARB := 1
|
||||
|
||||
@@ -129,7 +129,6 @@ srcs += common/utils/assert.c \
|
||||
common/ptimer/ptimer.c \
|
||||
common/sync/channel_sync.c \
|
||||
common/sync/channel_sync_syncpt.c \
|
||||
common/power_features/power_features.c \
|
||||
common/power_features/cg/cg.c \
|
||||
common/fifo/preempt.c \
|
||||
common/fifo/channel.c \
|
||||
@@ -642,3 +641,7 @@ srcs += common/sim/sim.c \
|
||||
common/sim/sim_pci.c \
|
||||
common/sim/sim_netlist.c
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_NVGPU_NON_FUSA),1)
|
||||
srcs += common/power_features/power_features.c
|
||||
endif
|
||||
|
||||
@@ -207,8 +207,9 @@ int nvgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powerg
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
err = nvgpu_cg_pg_disable(g);
|
||||
|
||||
#endif
|
||||
if (err == 0) {
|
||||
dbg_s->is_pg_disabled = true;
|
||||
nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn,
|
||||
@@ -219,17 +220,18 @@ int nvgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powerg
|
||||
/* release pending exceptions to fault/be handled as usual */
|
||||
/*TBD: ordering of these? */
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
err = nvgpu_cg_pg_enable(g);
|
||||
|
||||
nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
|
||||
|
||||
gk20a_idle(g);
|
||||
|
||||
#endif
|
||||
if (err == 0) {
|
||||
dbg_s->is_pg_disabled = false;
|
||||
nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn,
|
||||
"pg enabled");
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
|
||||
|
||||
gk20a_idle(g);
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %s done",
|
||||
|
||||
@@ -29,13 +29,17 @@ static void nvgpu_cg_set_mode(struct gk20a *g, u32 cgmode, u32 mode_config)
|
||||
{
|
||||
u32 engine_idx;
|
||||
u32 active_engine_id = 0;
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
struct nvgpu_engine_info *engine_info = NULL;
|
||||
#endif
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
for (engine_idx = 0; engine_idx < f->num_engines; ++engine_idx) {
|
||||
active_engine_id = f->active_engines_list[engine_idx];
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
engine_info = &f->engine_info[active_engine_id];
|
||||
|
||||
/* gr_engine supports both BLCG and ELCG */
|
||||
@@ -44,7 +48,9 @@ static void nvgpu_cg_set_mode(struct gk20a *g, u32 cgmode, u32 mode_config)
|
||||
g->ops.therm.init_blcg_mode(g, (u32)mode_config,
|
||||
active_engine_id);
|
||||
break;
|
||||
} else if (cgmode == ELCG_MODE) {
|
||||
} else
|
||||
#endif
|
||||
if (cgmode == ELCG_MODE) {
|
||||
g->ops.therm.init_elcg_mode(g, (u32)mode_config,
|
||||
active_engine_id);
|
||||
} else {
|
||||
@@ -86,78 +92,6 @@ void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g)
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_elcg_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (g->elcg_enabled) {
|
||||
nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_AUTO);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_elcg_disable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (g->elcg_enabled) {
|
||||
nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
|
||||
}
|
||||
|
||||
void nvgpu_cg_blcg_mode_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (g->blcg_enabled) {
|
||||
nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_AUTO);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
|
||||
}
|
||||
|
||||
void nvgpu_cg_blcg_mode_disable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (g->blcg_enabled) {
|
||||
nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_RUN);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
|
||||
|
||||
}
|
||||
|
||||
void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
@@ -297,60 +231,6 @@ done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (!g->slcg_enabled) {
|
||||
goto done;
|
||||
}
|
||||
if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_ltc_load_gating_prod(g, true);
|
||||
}
|
||||
if (g->ops.cg.slcg_perf_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_perf_load_gating_prod(g, true);
|
||||
}
|
||||
if (g->ops.cg.slcg_gr_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_gr_load_gating_prod(g, true);
|
||||
}
|
||||
done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (!g->slcg_enabled) {
|
||||
goto done;
|
||||
}
|
||||
if (g->ops.cg.slcg_gr_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_gr_load_gating_prod(g, false);
|
||||
}
|
||||
if (g->ops.cg.slcg_perf_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_perf_load_gating_prod(g, false);
|
||||
}
|
||||
if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_ltc_load_gating_prod(g, false);
|
||||
}
|
||||
done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
@@ -474,6 +354,133 @@ pg_gr_load:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
void nvgpu_cg_elcg_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (g->elcg_enabled) {
|
||||
nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_AUTO);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_elcg_disable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (g->elcg_enabled) {
|
||||
nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
|
||||
}
|
||||
|
||||
void nvgpu_cg_blcg_mode_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (g->blcg_enabled) {
|
||||
nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_AUTO);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
|
||||
}
|
||||
|
||||
void nvgpu_cg_blcg_mode_disable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (g->blcg_enabled) {
|
||||
nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_RUN);
|
||||
}
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
|
||||
|
||||
}
|
||||
|
||||
void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (!g->slcg_enabled) {
|
||||
goto done;
|
||||
}
|
||||
if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_ltc_load_gating_prod(g, true);
|
||||
}
|
||||
if (g->ops.cg.slcg_perf_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_perf_load_gating_prod(g, true);
|
||||
}
|
||||
if (g->ops.cg.slcg_gr_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_gr_load_gating_prod(g, true);
|
||||
}
|
||||
done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ops.gr.init.wait_initialized(g);
|
||||
|
||||
nvgpu_mutex_acquire(&g->cg_pg_lock);
|
||||
if (!g->slcg_enabled) {
|
||||
goto done;
|
||||
}
|
||||
if (g->ops.cg.slcg_gr_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_gr_load_gating_prod(g, false);
|
||||
}
|
||||
if (g->ops.cg.slcg_perf_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_perf_load_gating_prod(g, false);
|
||||
}
|
||||
if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) {
|
||||
g->ops.cg.slcg_ltc_load_gating_prod(g, false);
|
||||
}
|
||||
done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
|
||||
void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
@@ -637,3 +644,4 @@ void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable)
|
||||
done:
|
||||
nvgpu_mutex_release(&g->cg_pg_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -191,9 +191,11 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,
|
||||
/* Disable runlist scheduler */
|
||||
nvgpu_runlist_set_state(g, runlists_mask, RUNLIST_DISABLED);
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
if (nvgpu_cg_pg_disable(g) != 0) {
|
||||
nvgpu_warn(g, "fail to disable power mgmt");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (rc_type == RC_TYPE_MMU_FAULT) {
|
||||
gk20a_debug_dump(g);
|
||||
@@ -298,9 +300,11 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,
|
||||
|
||||
nvgpu_runlist_set_state(g, runlists_mask, RUNLIST_ENABLED);
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
if (nvgpu_cg_pg_enable(g) != 0) {
|
||||
nvgpu_warn(g, "fail to enable power mgmt");
|
||||
}
|
||||
#endif
|
||||
|
||||
g->ops.fifo.intr_unset_recover_mask(g);
|
||||
|
||||
|
||||
@@ -49,28 +49,34 @@ struct gk20a;
|
||||
struct nvgpu_fifo;
|
||||
|
||||
void nvgpu_cg_init_gr_load_gating_prod(struct gk20a *g);
|
||||
void nvgpu_cg_elcg_enable(struct gk20a *g);
|
||||
void nvgpu_cg_elcg_disable(struct gk20a *g);
|
||||
void nvgpu_cg_elcg_enable_no_wait(struct gk20a *g);
|
||||
void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g);
|
||||
void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable);
|
||||
|
||||
void nvgpu_cg_blcg_mode_enable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_mode_disable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_fifo_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_pmu_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_ce_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_gr_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable);
|
||||
|
||||
void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_fb_ltc_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_priring_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_pmu_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_ce2_load_enable(struct gk20a *g);
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
|
||||
void nvgpu_cg_elcg_enable(struct gk20a *g);
|
||||
void nvgpu_cg_elcg_disable(struct gk20a *g);
|
||||
void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable);
|
||||
|
||||
void nvgpu_cg_blcg_mode_enable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_mode_disable(struct gk20a *g);
|
||||
void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable);
|
||||
|
||||
void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g);
|
||||
void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable);
|
||||
|
||||
#endif
|
||||
#endif /*NVGPU_POWER_FEATURES_CG_H*/
|
||||
|
||||
@@ -24,6 +24,8 @@
|
||||
#ifndef NVGPU_POWER_FEATURES_H
|
||||
#define NVGPU_POWER_FEATURES_H
|
||||
|
||||
#ifdef CONFIG_NVGPU_NON_FUSA
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct gk20a;
|
||||
@@ -31,4 +33,5 @@ struct gk20a;
|
||||
int nvgpu_cg_pg_disable(struct gk20a *g);
|
||||
int nvgpu_cg_pg_enable(struct gk20a *g);
|
||||
|
||||
#endif
|
||||
#endif /*NVGPU_POWER_FEATURES_H*/
|
||||
|
||||
Reference in New Issue
Block a user