diff --git a/arch/nvgpu-common.yaml b/arch/nvgpu-common.yaml index f99f7bb53..dc78c3011 100644 --- a/arch/nvgpu-common.yaml +++ b/arch/nvgpu-common.yaml @@ -945,11 +945,10 @@ sync: sources: [ common/sync/channel_sync_semaphore.c, include/nvgpu/channel_sync_semaphore.h ] power_features: - safe: no owner: Seema K children: power_features: - safe: yes + safe: no sources: [ common/power_features/power_features.c, include/nvgpu/power_features/power_features.h ] cg: diff --git a/drivers/gpu/nvgpu/Makefile.shared.configs b/drivers/gpu/nvgpu/Makefile.shared.configs index f4848ea5e..191ce253d 100644 --- a/drivers/gpu/nvgpu/Makefile.shared.configs +++ b/drivers/gpu/nvgpu/Makefile.shared.configs @@ -165,6 +165,7 @@ CONFIG_NVGPU_HAL_NON_FUSA := 1 NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_HAL_NON_FUSA # Enable non FUSA common code for normal build +CONFIG_NVGPU_NON_FUSA := 1 NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_NON_FUSA CONFIG_NVGPU_CLK_ARB := 1 diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index ccf2e8fe6..fa64723e7 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -129,7 +129,6 @@ srcs += common/utils/assert.c \ common/ptimer/ptimer.c \ common/sync/channel_sync.c \ common/sync/channel_sync_syncpt.c \ - common/power_features/power_features.c \ common/power_features/cg/cg.c \ common/fifo/preempt.c \ common/fifo/channel.c \ @@ -642,3 +641,7 @@ srcs += common/sim/sim.c \ common/sim/sim_pci.c \ common/sim/sim_netlist.c endif + +ifeq ($(CONFIG_NVGPU_NON_FUSA),1) +srcs += common/power_features/power_features.c +endif diff --git a/drivers/gpu/nvgpu/common/debugger.c b/drivers/gpu/nvgpu/common/debugger.c index bcd04a98a..f71ced7f3 100644 --- a/drivers/gpu/nvgpu/common/debugger.c +++ b/drivers/gpu/nvgpu/common/debugger.c @@ -207,8 +207,9 @@ int nvgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powerg return err; } +#ifdef CONFIG_NVGPU_NON_FUSA err = nvgpu_cg_pg_disable(g); - +#endif if (err == 0) { dbg_s->is_pg_disabled = true; nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, @@ -219,17 +220,18 @@ int nvgpu_dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powerg /* release pending exceptions to fault/be handled as usual */ /*TBD: ordering of these? */ +#ifdef CONFIG_NVGPU_NON_FUSA err = nvgpu_cg_pg_enable(g); - - nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); - - gk20a_idle(g); - +#endif if (err == 0) { dbg_s->is_pg_disabled = false; nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "pg enabled"); } + + nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); + + gk20a_idle(g); } nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %s done", diff --git a/drivers/gpu/nvgpu/common/power_features/cg/cg.c b/drivers/gpu/nvgpu/common/power_features/cg/cg.c index b6a633ae0..d5da910a2 100644 --- a/drivers/gpu/nvgpu/common/power_features/cg/cg.c +++ b/drivers/gpu/nvgpu/common/power_features/cg/cg.c @@ -29,13 +29,17 @@ static void nvgpu_cg_set_mode(struct gk20a *g, u32 cgmode, u32 mode_config) { u32 engine_idx; u32 active_engine_id = 0; +#ifdef CONFIG_NVGPU_NON_FUSA struct nvgpu_engine_info *engine_info = NULL; +#endif struct nvgpu_fifo *f = &g->fifo; nvgpu_log_fn(g, " "); for (engine_idx = 0; engine_idx < f->num_engines; ++engine_idx) { active_engine_id = f->active_engines_list[engine_idx]; + +#ifdef CONFIG_NVGPU_NON_FUSA engine_info = &f->engine_info[active_engine_id]; /* gr_engine supports both BLCG and ELCG */ @@ -44,7 +48,9 @@ static void nvgpu_cg_set_mode(struct gk20a *g, u32 cgmode, u32 mode_config) g->ops.therm.init_blcg_mode(g, (u32)mode_config, active_engine_id); break; - } else if (cgmode == ELCG_MODE) { + } else +#endif + if (cgmode == ELCG_MODE) { g->ops.therm.init_elcg_mode(g, (u32)mode_config, active_engine_id); } else { @@ -86,78 +92,6 @@ void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g) nvgpu_mutex_release(&g->cg_pg_lock); } -void nvgpu_cg_elcg_enable(struct gk20a *g) -{ - nvgpu_log_fn(g, " "); - - if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) { - return; - } - - g->ops.gr.init.wait_initialized(g); - - nvgpu_mutex_acquire(&g->cg_pg_lock); - if (g->elcg_enabled) { - nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_AUTO); - } - nvgpu_mutex_release(&g->cg_pg_lock); -} - -void nvgpu_cg_elcg_disable(struct gk20a *g) -{ - nvgpu_log_fn(g, " "); - - if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) { - return; - } - - g->ops.gr.init.wait_initialized(g); - - nvgpu_mutex_acquire(&g->cg_pg_lock); - if (g->elcg_enabled) { - nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN); - } - nvgpu_mutex_release(&g->cg_pg_lock); - -} - -void nvgpu_cg_blcg_mode_enable(struct gk20a *g) -{ - nvgpu_log_fn(g, " "); - - if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) { - return; - } - - g->ops.gr.init.wait_initialized(g); - - nvgpu_mutex_acquire(&g->cg_pg_lock); - if (g->blcg_enabled) { - nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_AUTO); - } - nvgpu_mutex_release(&g->cg_pg_lock); - -} - -void nvgpu_cg_blcg_mode_disable(struct gk20a *g) -{ - nvgpu_log_fn(g, " "); - - if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) { - return; - } - - g->ops.gr.init.wait_initialized(g); - - nvgpu_mutex_acquire(&g->cg_pg_lock); - if (g->blcg_enabled) { - nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_RUN); - } - nvgpu_mutex_release(&g->cg_pg_lock); - - -} - void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g) { nvgpu_log_fn(g, " "); @@ -297,60 +231,6 @@ done: nvgpu_mutex_release(&g->cg_pg_lock); } -void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g) -{ - nvgpu_log_fn(g, " "); - - if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) { - return; - } - - g->ops.gr.init.wait_initialized(g); - - nvgpu_mutex_acquire(&g->cg_pg_lock); - if (!g->slcg_enabled) { - goto done; - } - if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) { - g->ops.cg.slcg_ltc_load_gating_prod(g, true); - } - if (g->ops.cg.slcg_perf_load_gating_prod != NULL) { - g->ops.cg.slcg_perf_load_gating_prod(g, true); - } - if (g->ops.cg.slcg_gr_load_gating_prod != NULL) { - g->ops.cg.slcg_gr_load_gating_prod(g, true); - } -done: - nvgpu_mutex_release(&g->cg_pg_lock); -} - -void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g) -{ - nvgpu_log_fn(g, " "); - - if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) { - return; - } - - g->ops.gr.init.wait_initialized(g); - - nvgpu_mutex_acquire(&g->cg_pg_lock); - if (!g->slcg_enabled) { - goto done; - } - if (g->ops.cg.slcg_gr_load_gating_prod != NULL) { - g->ops.cg.slcg_gr_load_gating_prod(g, false); - } - if (g->ops.cg.slcg_perf_load_gating_prod != NULL) { - g->ops.cg.slcg_perf_load_gating_prod(g, false); - } - if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) { - g->ops.cg.slcg_ltc_load_gating_prod(g, false); - } -done: - nvgpu_mutex_release(&g->cg_pg_lock); -} - void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g) { nvgpu_log_fn(g, " "); @@ -474,6 +354,133 @@ pg_gr_load: nvgpu_mutex_release(&g->cg_pg_lock); } +#ifdef CONFIG_NVGPU_NON_FUSA +void nvgpu_cg_elcg_enable(struct gk20a *g) +{ + nvgpu_log_fn(g, " "); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) { + return; + } + + g->ops.gr.init.wait_initialized(g); + + nvgpu_mutex_acquire(&g->cg_pg_lock); + if (g->elcg_enabled) { + nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_AUTO); + } + nvgpu_mutex_release(&g->cg_pg_lock); +} + +void nvgpu_cg_elcg_disable(struct gk20a *g) +{ + nvgpu_log_fn(g, " "); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) { + return; + } + + g->ops.gr.init.wait_initialized(g); + + nvgpu_mutex_acquire(&g->cg_pg_lock); + if (g->elcg_enabled) { + nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN); + } + nvgpu_mutex_release(&g->cg_pg_lock); + +} + +void nvgpu_cg_blcg_mode_enable(struct gk20a *g) +{ + nvgpu_log_fn(g, " "); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) { + return; + } + + g->ops.gr.init.wait_initialized(g); + + nvgpu_mutex_acquire(&g->cg_pg_lock); + if (g->blcg_enabled) { + nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_AUTO); + } + nvgpu_mutex_release(&g->cg_pg_lock); + +} + +void nvgpu_cg_blcg_mode_disable(struct gk20a *g) +{ + nvgpu_log_fn(g, " "); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) { + return; + } + + g->ops.gr.init.wait_initialized(g); + + nvgpu_mutex_acquire(&g->cg_pg_lock); + if (g->blcg_enabled) { + nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_RUN); + } + nvgpu_mutex_release(&g->cg_pg_lock); + + +} + +void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g) +{ + nvgpu_log_fn(g, " "); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) { + return; + } + + g->ops.gr.init.wait_initialized(g); + + nvgpu_mutex_acquire(&g->cg_pg_lock); + if (!g->slcg_enabled) { + goto done; + } + if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) { + g->ops.cg.slcg_ltc_load_gating_prod(g, true); + } + if (g->ops.cg.slcg_perf_load_gating_prod != NULL) { + g->ops.cg.slcg_perf_load_gating_prod(g, true); + } + if (g->ops.cg.slcg_gr_load_gating_prod != NULL) { + g->ops.cg.slcg_gr_load_gating_prod(g, true); + } +done: + nvgpu_mutex_release(&g->cg_pg_lock); +} + +void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g) +{ + nvgpu_log_fn(g, " "); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) { + return; + } + + g->ops.gr.init.wait_initialized(g); + + nvgpu_mutex_acquire(&g->cg_pg_lock); + if (!g->slcg_enabled) { + goto done; + } + if (g->ops.cg.slcg_gr_load_gating_prod != NULL) { + g->ops.cg.slcg_gr_load_gating_prod(g, false); + } + if (g->ops.cg.slcg_perf_load_gating_prod != NULL) { + g->ops.cg.slcg_perf_load_gating_prod(g, false); + } + if (g->ops.cg.slcg_ltc_load_gating_prod != NULL) { + g->ops.cg.slcg_ltc_load_gating_prod(g, false); + } +done: + nvgpu_mutex_release(&g->cg_pg_lock); +} + void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable) { nvgpu_log_fn(g, " "); @@ -637,3 +644,4 @@ void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable) done: nvgpu_mutex_release(&g->cg_pg_lock); } +#endif diff --git a/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c b/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c index 97c7484e0..fe8a757cd 100644 --- a/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c +++ b/drivers/gpu/nvgpu/hal/rc/rc_gv11b.c @@ -191,9 +191,11 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask, /* Disable runlist scheduler */ nvgpu_runlist_set_state(g, runlists_mask, RUNLIST_DISABLED); +#ifdef CONFIG_NVGPU_NON_FUSA if (nvgpu_cg_pg_disable(g) != 0) { nvgpu_warn(g, "fail to disable power mgmt"); } +#endif if (rc_type == RC_TYPE_MMU_FAULT) { gk20a_debug_dump(g); @@ -298,9 +300,11 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask, nvgpu_runlist_set_state(g, runlists_mask, RUNLIST_ENABLED); +#ifdef CONFIG_NVGPU_NON_FUSA if (nvgpu_cg_pg_enable(g) != 0) { nvgpu_warn(g, "fail to enable power mgmt"); } +#endif g->ops.fifo.intr_unset_recover_mask(g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/power_features/cg.h b/drivers/gpu/nvgpu/include/nvgpu/power_features/cg.h index c0000de89..f1e7d58f5 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/power_features/cg.h +++ b/drivers/gpu/nvgpu/include/nvgpu/power_features/cg.h @@ -49,28 +49,34 @@ struct gk20a; struct nvgpu_fifo; void nvgpu_cg_init_gr_load_gating_prod(struct gk20a *g); -void nvgpu_cg_elcg_enable(struct gk20a *g); -void nvgpu_cg_elcg_disable(struct gk20a *g); void nvgpu_cg_elcg_enable_no_wait(struct gk20a *g); void nvgpu_cg_elcg_disable_no_wait(struct gk20a *g); -void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable); -void nvgpu_cg_blcg_mode_enable(struct gk20a *g); -void nvgpu_cg_blcg_mode_disable(struct gk20a *g); void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g); void nvgpu_cg_blcg_fifo_load_enable(struct gk20a *g); void nvgpu_cg_blcg_pmu_load_enable(struct gk20a *g); void nvgpu_cg_blcg_ce_load_enable(struct gk20a *g); void nvgpu_cg_blcg_gr_load_enable(struct gk20a *g); -void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable); -void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g); -void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g); void nvgpu_cg_slcg_fb_ltc_load_enable(struct gk20a *g); void nvgpu_cg_slcg_priring_load_enable(struct gk20a *g); void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g); void nvgpu_cg_slcg_pmu_load_enable(struct gk20a *g); void nvgpu_cg_slcg_ce2_load_enable(struct gk20a *g); + +#ifdef CONFIG_NVGPU_NON_FUSA + +void nvgpu_cg_elcg_enable(struct gk20a *g); +void nvgpu_cg_elcg_disable(struct gk20a *g); +void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable); + +void nvgpu_cg_blcg_mode_enable(struct gk20a *g); +void nvgpu_cg_blcg_mode_disable(struct gk20a *g); +void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable); + +void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g); +void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g); void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable); +#endif #endif /*NVGPU_POWER_FEATURES_CG_H*/ diff --git a/drivers/gpu/nvgpu/include/nvgpu/power_features/power_features.h b/drivers/gpu/nvgpu/include/nvgpu/power_features/power_features.h index f6ffccf18..d39b2aa7b 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/power_features/power_features.h +++ b/drivers/gpu/nvgpu/include/nvgpu/power_features/power_features.h @@ -24,6 +24,8 @@ #ifndef NVGPU_POWER_FEATURES_H #define NVGPU_POWER_FEATURES_H +#ifdef CONFIG_NVGPU_NON_FUSA + #include struct gk20a; @@ -31,4 +33,5 @@ struct gk20a; int nvgpu_cg_pg_disable(struct gk20a *g); int nvgpu_cg_pg_enable(struct gk20a *g); +#endif #endif /*NVGPU_POWER_FEATURES_H*/