diff --git a/drivers/gpu/nvgpu/common/fifo/engines.c b/drivers/gpu/nvgpu/common/fifo/engines.c index 42d5f8697..0f1169e73 100644 --- a/drivers/gpu/nvgpu/common/fifo/engines.c +++ b/drivers/gpu/nvgpu/common/fifo/engines.c @@ -20,8 +20,16 @@ * DEALINGS IN THE SOFTWARE. */ +#include +#include +#include #include +#include +#include #include +#include + +#include "gk20a/fifo_gk20a.h" enum nvgpu_fifo_engine nvgpu_engine_enum_from_type(struct gk20a *g, u32 engine_type) @@ -225,4 +233,163 @@ u32 nvgpu_engine_get_all_ce_reset_mask(struct gk20a *g) } return reset_mask; -} \ No newline at end of file +} + +#ifdef NVGPU_ENGINE + +int nvgpu_engine_enable_activity(struct gk20a *g, + struct fifo_engine_info_gk20a *eng_info) +{ + nvgpu_log(g, gpu_dbg_info, "start"); + + gk20a_fifo_set_runlist_state(g, BIT32(eng_info->runlist_id), + RUNLIST_ENABLED); + return 0; +} + +int nvgpu_engine_enable_activity_all(struct gk20a *g) +{ + unsigned int i; + int err = 0, ret = 0; + + for (i = 0; i < g->fifo.num_engines; i++) { + u32 active_engine_id = g->fifo.active_engines_list[i]; + err = nvgpu_engine_enable_activity(g, + &g->fifo.engine_info[active_engine_id]); + if (err != 0) { + nvgpu_err(g, + "failed to enable engine %d activity", active_engine_id); + ret = err; + } + } + + return ret; +} + +int nvgpu_engine_disable_activity(struct gk20a *g, + struct fifo_engine_info_gk20a *eng_info, + bool wait_for_idle) +{ + u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; + u32 engine_chid = FIFO_INVAL_CHANNEL_ID; + u32 token = PMU_INVALID_MUTEX_OWNER_ID; + int mutex_ret = -EINVAL; + struct channel_gk20a *ch = NULL; + int err = 0; + struct nvgpu_engine_status_info engine_status; + struct nvgpu_pbdma_status_info pbdma_status; + + nvgpu_log_fn(g, " "); + + g->ops.engine_status.read_engine_status_info(g, eng_info->engine_id, + &engine_status); + if (engine_status.is_busy && !wait_for_idle) { + return -EBUSY; + } + + if (g->ops.pmu.is_pmu_supported(g)) { + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, + PMU_MUTEX_ID_FIFO, &token); + } + + gk20a_fifo_set_runlist_state(g, BIT32(eng_info->runlist_id), + RUNLIST_DISABLED); + + /* chid from pbdma status */ + g->ops.pbdma_status.read_pbdma_status_info(g, eng_info->pbdma_id, + &pbdma_status); + if (nvgpu_pbdma_status_is_chsw_valid(&pbdma_status) || + nvgpu_pbdma_status_is_chsw_save(&pbdma_status)) { + pbdma_chid = pbdma_status.id; + } else if (nvgpu_pbdma_status_is_chsw_load(&pbdma_status) || + nvgpu_pbdma_status_is_chsw_switch(&pbdma_status)) { + pbdma_chid = pbdma_status.next_id; + } + + if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) { + ch = gk20a_channel_from_id(g, pbdma_chid); + if (ch != NULL) { + err = g->ops.fifo.preempt_channel(g, ch); + gk20a_channel_put(ch); + } + if (err != 0) { + goto clean_up; + } + } + + /* chid from engine status */ + g->ops.engine_status.read_engine_status_info(g, eng_info->engine_id, + &engine_status); + if (nvgpu_engine_status_is_ctxsw_valid(&engine_status) || + nvgpu_engine_status_is_ctxsw_save(&engine_status)) { + engine_chid = engine_status.ctx_id; + } else if (nvgpu_engine_status_is_ctxsw_switch(&engine_status) || + nvgpu_engine_status_is_ctxsw_load(&engine_status)) { + engine_chid = engine_status.ctx_next_id; + } + + if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) { + ch = gk20a_channel_from_id(g, engine_chid); + if (ch != NULL) { + err = g->ops.fifo.preempt_channel(g, ch); + gk20a_channel_put(ch); + } + if (err != 0) { + goto clean_up; + } + } + +clean_up: + if (mutex_ret == 0) { + nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + } + + if (err != 0) { + nvgpu_log_fn(g, "failed"); + if (nvgpu_engine_enable_activity(g, eng_info) != 0) { + nvgpu_err(g, + "failed to enable gr engine activity"); + } + } else { + nvgpu_log_fn(g, "done"); + } + return err; +} + +int nvgpu_engine_disable_activity_all(struct gk20a *g, + bool wait_for_idle) +{ + unsigned int i; + int err = 0, ret = 0; + u32 active_engine_id; + + for (i = 0; i < g->fifo.num_engines; i++) { + active_engine_id = g->fifo.active_engines_list[i]; + err = nvgpu_engine_disable_activity(g, + &g->fifo.engine_info[active_engine_id], + wait_for_idle); + if (err != 0) { + nvgpu_err(g, "failed to disable engine %d activity", + active_engine_id); + ret = err; + break; + } + } + + if (err != 0) { + while (i-- != 0U) { + active_engine_id = g->fifo.active_engines_list[i]; + err = nvgpu_engine_enable_activity(g, + &g->fifo.engine_info[active_engine_id]); + if (err != 0) { + nvgpu_err(g, + "failed to re-enable engine %d activity", + active_engine_id); + } + } + } + + return ret; +} + +#endif /* NVGPU_ENGINE */ \ No newline at end of file diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 50e794b75..b234109b2 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -2305,159 +2305,6 @@ int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch) return err; } -int gk20a_fifo_enable_engine_activity(struct gk20a *g, - struct fifo_engine_info_gk20a *eng_info) -{ - nvgpu_log(g, gpu_dbg_info, "start"); - - gk20a_fifo_set_runlist_state(g, BIT32(eng_info->runlist_id), - RUNLIST_ENABLED); - return 0; -} - -int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) -{ - unsigned int i; - int err = 0, ret = 0; - - for (i = 0; i < g->fifo.num_engines; i++) { - u32 active_engine_id = g->fifo.active_engines_list[i]; - err = gk20a_fifo_enable_engine_activity(g, - &g->fifo.engine_info[active_engine_id]); - if (err != 0) { - nvgpu_err(g, - "failed to enable engine %d activity", active_engine_id); - ret = err; - } - } - - return ret; -} - -int gk20a_fifo_disable_engine_activity(struct gk20a *g, - struct fifo_engine_info_gk20a *eng_info, - bool wait_for_idle) -{ - u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; - u32 engine_chid = FIFO_INVAL_CHANNEL_ID; - u32 token = PMU_INVALID_MUTEX_OWNER_ID; - int mutex_ret = 0; - struct channel_gk20a *ch = NULL; - int err = 0; - struct nvgpu_engine_status_info engine_status; - struct nvgpu_pbdma_status_info pbdma_status; - - nvgpu_log_fn(g, " "); - - g->ops.engine_status.read_engine_status_info(g, eng_info->engine_id, - &engine_status); - if (engine_status.is_busy && !wait_for_idle) { - return -EBUSY; - } - - mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, - PMU_MUTEX_ID_FIFO, &token); - - gk20a_fifo_set_runlist_state(g, BIT32(eng_info->runlist_id), - RUNLIST_DISABLED); - - /* chid from pbdma status */ - g->ops.pbdma_status.read_pbdma_status_info(g, eng_info->pbdma_id, - &pbdma_status); - if (nvgpu_pbdma_status_is_chsw_valid(&pbdma_status) || - nvgpu_pbdma_status_is_chsw_save(&pbdma_status)) { - pbdma_chid = pbdma_status.id; - } else if (nvgpu_pbdma_status_is_chsw_load(&pbdma_status) || - nvgpu_pbdma_status_is_chsw_switch(&pbdma_status)) { - pbdma_chid = pbdma_status.next_id; - } - - if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) { - ch = gk20a_channel_from_id(g, pbdma_chid); - if (ch != NULL) { - err = g->ops.fifo.preempt_channel(g, ch); - gk20a_channel_put(ch); - } - if (err != 0) { - goto clean_up; - } - } - - /* chid from engine status */ - g->ops.engine_status.read_engine_status_info(g, eng_info->engine_id, - &engine_status); - if (nvgpu_engine_status_is_ctxsw_valid(&engine_status) || - nvgpu_engine_status_is_ctxsw_save(&engine_status)) { - engine_chid = engine_status.ctx_id; - } else if (nvgpu_engine_status_is_ctxsw_switch(&engine_status) || - nvgpu_engine_status_is_ctxsw_load(&engine_status)) { - engine_chid = engine_status.ctx_next_id; - } - - if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) { - ch = gk20a_channel_from_id(g, engine_chid); - if (ch != NULL) { - err = g->ops.fifo.preempt_channel(g, ch); - gk20a_channel_put(ch); - } - if (err != 0) { - goto clean_up; - } - } - -clean_up: - if (mutex_ret == 0) { - nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); - } - - if (err != 0) { - nvgpu_log_fn(g, "failed"); - if (gk20a_fifo_enable_engine_activity(g, eng_info) != 0) { - nvgpu_err(g, - "failed to enable gr engine activity"); - } - } else { - nvgpu_log_fn(g, "done"); - } - return err; -} - -int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, - bool wait_for_idle) -{ - unsigned int i; - int err = 0, ret = 0; - u32 active_engine_id; - - for (i = 0; i < g->fifo.num_engines; i++) { - active_engine_id = g->fifo.active_engines_list[i]; - err = gk20a_fifo_disable_engine_activity(g, - &g->fifo.engine_info[active_engine_id], - wait_for_idle); - if (err != 0) { - nvgpu_err(g, "failed to disable engine %d activity", - active_engine_id); - ret = err; - break; - } - } - - if (err != 0) { - while (i-- != 0U) { - active_engine_id = g->fifo.active_engines_list[i]; - err = gk20a_fifo_enable_engine_activity(g, - &g->fifo.engine_info[active_engine_id]); - if (err != 0) { - nvgpu_err(g, - "failed to re-enable engine %d activity", - active_engine_id); - } - } - } - - return ret; -} - u32 gk20a_fifo_runlist_busy_engines(struct gk20a *g, u32 runlist_id) { struct fifo_gk20a *f = &g->fifo; diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index 8a46938fa..5acbaf898 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h @@ -257,15 +257,6 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch); int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg); int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch); -int gk20a_fifo_enable_engine_activity(struct gk20a *g, - struct fifo_engine_info_gk20a *eng_info); -int gk20a_fifo_enable_all_engine_activity(struct gk20a *g); -int gk20a_fifo_disable_engine_activity(struct gk20a *g, - struct fifo_engine_info_gk20a *eng_info, - bool wait_for_idle); -int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, - bool wait_for_idle); - u32 gk20a_fifo_engines_on_ch(struct gk20a *g, u32 chid); int gk20a_fifo_suspend(struct gk20a *g); diff --git a/drivers/gpu/nvgpu/include/nvgpu/engines.h b/drivers/gpu/nvgpu/include/nvgpu/engines.h index 9e9e62ae4..ebc1d577e 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/engines.h +++ b/drivers/gpu/nvgpu/include/nvgpu/engines.h @@ -51,4 +51,13 @@ u32 nvgpu_engine_interrupt_mask(struct gk20a *g); u32 nvgpu_engine_act_interrupt_mask(struct gk20a *g, u32 act_eng_id); u32 nvgpu_engine_get_all_ce_reset_mask(struct gk20a *g); +int nvgpu_engine_enable_activity(struct gk20a *g, + struct fifo_engine_info_gk20a *eng_info); +int nvgpu_engine_enable_activity_all(struct gk20a *g); +int nvgpu_engine_disable_activity(struct gk20a *g, + struct fifo_engine_info_gk20a *eng_info, + bool wait_for_idle); +int nvgpu_engine_disable_activity_all(struct gk20a *g, + bool wait_for_idle); + #endif /*NVGPU_ENGINE_H*/ \ No newline at end of file diff --git a/drivers/gpu/nvgpu/os/linux/module.c b/drivers/gpu/nvgpu/os/linux/module.c index fd63aa3bf..8ecd8c4c5 100644 --- a/drivers/gpu/nvgpu/os/linux/module.c +++ b/drivers/gpu/nvgpu/os/linux/module.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include "platform_gk20a.h" @@ -960,7 +961,7 @@ int nvgpu_quiesce(struct gk20a *g) return err; } - err = gk20a_fifo_disable_all_engine_activity(g, true); + err = nvgpu_engine_disable_activity_all(g, true); if (err) { nvgpu_err(g, "failed to disable engine activity, err=%d",