mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: move engine functions from fifo to engines
Move below functions from fifo to engines gk20a_fifo_get_fast_ce_runlist_id gk20a_fifo_get_gr_runlist_id gk20a_fifo_is_valid_runlist_id gk20a_engine_id_to_mmu_id gk20a_mmu_id_to_engine_id Rename above functions as nvgpu_engine_get_fast_ce_runlist_id nvgpu_engine_get_gr_runlist_id nvgpu_engine_is_valid_runlist_id nvgpu_engine_id_to_mmu_fault_id nvgpu_engine_mmu_fault_id_to_engine_id JIRA NVGPU-1313 Change-Id: I87c2a03054cb07cb5c59773c9e85f1b54ecc4619 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2084304 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0a737a85ee
commit
584e9dee8d
@@ -676,8 +676,8 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
|
||||
struct channel_gk20a *ch;
|
||||
|
||||
/* compatibility with existing code */
|
||||
if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) {
|
||||
runlist_id = gk20a_fifo_get_gr_runlist_id(g);
|
||||
if (!nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
|
||||
runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
@@ -567,3 +567,129 @@ void nvgpu_engine_reset(struct gk20a *g, u32 engine_id)
|
||||
g->ops.mc.reset(g, engine_info->reset_mask);
|
||||
}
|
||||
}
|
||||
|
||||
u32 nvgpu_engine_get_fast_ce_runlist_id(struct gk20a *g)
|
||||
{
|
||||
u32 ce_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||
enum nvgpu_fifo_engine engine_enum;
|
||||
struct fifo_gk20a *f = NULL;
|
||||
u32 engine_id_idx;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
u32 active_engine_id = 0U;
|
||||
|
||||
if (g == NULL) {
|
||||
return ce_runlist_id;
|
||||
}
|
||||
|
||||
f = &g->fifo;
|
||||
|
||||
for (engine_id_idx = 0U; engine_id_idx < f->num_engines;
|
||||
++engine_id_idx) {
|
||||
active_engine_id = f->active_engines_list[engine_id_idx];
|
||||
engine_info = &f->engine_info[active_engine_id];
|
||||
engine_enum = engine_info->engine_enum;
|
||||
|
||||
/* select last available ASYNC_CE if available */
|
||||
if (engine_enum == NVGPU_ENGINE_ASYNC_CE_GK20A) {
|
||||
ce_runlist_id = engine_info->runlist_id;
|
||||
}
|
||||
}
|
||||
|
||||
return ce_runlist_id;
|
||||
}
|
||||
|
||||
u32 nvgpu_engine_get_gr_runlist_id(struct gk20a *g)
|
||||
{
|
||||
u32 gr_engine_cnt = 0;
|
||||
u32 gr_engine_id = FIFO_INVAL_ENGINE_ID;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
u32 gr_runlist_id = U32_MAX;
|
||||
|
||||
/* Consider 1st available GR engine */
|
||||
gr_engine_cnt = nvgpu_engine_get_ids(g, &gr_engine_id,
|
||||
1, NVGPU_ENGINE_GR_GK20A);
|
||||
|
||||
if (gr_engine_cnt == 0U) {
|
||||
nvgpu_err(g,
|
||||
"No GR engine available on this device!");
|
||||
goto end;
|
||||
}
|
||||
|
||||
engine_info = nvgpu_engine_get_active_eng_info(g, gr_engine_id);
|
||||
|
||||
if (engine_info != NULL) {
|
||||
gr_runlist_id = engine_info->runlist_id;
|
||||
} else {
|
||||
nvgpu_err(g,
|
||||
"gr_engine_id: %d is not in active list/invalid",
|
||||
gr_engine_id);
|
||||
}
|
||||
|
||||
end:
|
||||
return gr_runlist_id;
|
||||
}
|
||||
|
||||
bool nvgpu_engine_is_valid_runlist_id(struct gk20a *g, u32 runlist_id)
|
||||
{
|
||||
struct fifo_gk20a *f = NULL;
|
||||
u32 engine_id_idx;
|
||||
u32 active_engine_id;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
|
||||
if (g == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
f = &g->fifo;
|
||||
|
||||
for (engine_id_idx = 0; engine_id_idx < f->num_engines;
|
||||
++engine_id_idx) {
|
||||
active_engine_id = f->active_engines_list[engine_id_idx];
|
||||
engine_info = nvgpu_engine_get_active_eng_info(g,
|
||||
active_engine_id);
|
||||
if ((engine_info != NULL) &&
|
||||
(engine_info->runlist_id == runlist_id)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Link engine IDs to MMU IDs and vice versa.
|
||||
*/
|
||||
u32 nvgpu_engine_id_to_mmu_fault_id(struct gk20a *g, u32 engine_id)
|
||||
{
|
||||
u32 fault_id = FIFO_INVAL_ENGINE_ID;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
|
||||
engine_info = nvgpu_engine_get_active_eng_info(g, engine_id);
|
||||
|
||||
if (engine_info != NULL) {
|
||||
fault_id = engine_info->fault_id;
|
||||
} else {
|
||||
nvgpu_err(g, "engine_id: %d is not in active list/invalid",
|
||||
engine_id);
|
||||
}
|
||||
return fault_id;
|
||||
}
|
||||
|
||||
u32 nvgpu_engine_mmu_fault_id_to_engine_id(struct gk20a *g, u32 fault_id)
|
||||
{
|
||||
u32 engine_id;
|
||||
u32 active_engine_id;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
|
||||
for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
|
||||
active_engine_id = f->active_engines_list[engine_id];
|
||||
engine_info = &g->fifo.engine_info[active_engine_id];
|
||||
|
||||
if (engine_info->fault_id == fault_id) {
|
||||
break;
|
||||
}
|
||||
active_engine_id = FIFO_INVAL_ENGINE_ID;
|
||||
}
|
||||
return active_engine_id;
|
||||
}
|
||||
|
||||
@@ -700,7 +700,7 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
}
|
||||
|
||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||
if (gk20a_fifo_is_valid_runlist_id(g, runlist_id)) {
|
||||
if (nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
|
||||
num_runlists++;
|
||||
}
|
||||
}
|
||||
@@ -719,7 +719,7 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
*/
|
||||
i = 0U;
|
||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||
if (!gk20a_fifo_is_valid_runlist_id(g, runlist_id)) {
|
||||
if (!nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
|
||||
/* skip inactive runlist */
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -353,7 +353,7 @@ void nvgpu_init_mm_ce_context(struct gk20a *g)
|
||||
(g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)) {
|
||||
g->mm.vidmem.ce_ctx_id =
|
||||
gk20a_ce_create_context(g,
|
||||
(int)gk20a_fifo_get_fast_ce_runlist_id(g),
|
||||
(int)nvgpu_engine_get_fast_ce_runlist_id(g),
|
||||
-1,
|
||||
-1);
|
||||
|
||||
|
||||
@@ -85,128 +85,6 @@ void nvgpu_report_host_error(struct gk20a *g, u32 inst,
|
||||
}
|
||||
}
|
||||
|
||||
u32 gk20a_fifo_get_fast_ce_runlist_id(struct gk20a *g)
|
||||
{
|
||||
u32 ce_runlist_id = gk20a_fifo_get_gr_runlist_id(g);
|
||||
enum nvgpu_fifo_engine engine_enum;
|
||||
struct fifo_gk20a *f = NULL;
|
||||
u32 engine_id_idx;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
u32 active_engine_id = 0;
|
||||
|
||||
if (g == NULL) {
|
||||
return ce_runlist_id;
|
||||
}
|
||||
|
||||
f = &g->fifo;
|
||||
|
||||
for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
|
||||
active_engine_id = f->active_engines_list[engine_id_idx];
|
||||
engine_info = &f->engine_info[active_engine_id];
|
||||
engine_enum = engine_info->engine_enum;
|
||||
|
||||
/* selecet last available ASYNC_CE if available */
|
||||
if (engine_enum == NVGPU_ENGINE_ASYNC_CE_GK20A) {
|
||||
ce_runlist_id = engine_info->runlist_id;
|
||||
}
|
||||
}
|
||||
|
||||
return ce_runlist_id;
|
||||
}
|
||||
|
||||
u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
|
||||
{
|
||||
u32 gr_engine_cnt = 0;
|
||||
u32 gr_engine_id = FIFO_INVAL_ENGINE_ID;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
u32 gr_runlist_id = U32_MAX;
|
||||
|
||||
/* Consider 1st available GR engine */
|
||||
gr_engine_cnt = nvgpu_engine_get_ids(g, &gr_engine_id,
|
||||
1, NVGPU_ENGINE_GR_GK20A);
|
||||
|
||||
if (gr_engine_cnt == 0U) {
|
||||
nvgpu_err(g,
|
||||
"No GR engine available on this device!");
|
||||
goto end;
|
||||
}
|
||||
|
||||
engine_info = nvgpu_engine_get_active_eng_info(g, gr_engine_id);
|
||||
|
||||
if (engine_info != NULL) {
|
||||
gr_runlist_id = engine_info->runlist_id;
|
||||
} else {
|
||||
nvgpu_err(g,
|
||||
"gr_engine_id is not in active list/invalid %d", gr_engine_id);
|
||||
}
|
||||
|
||||
end:
|
||||
return gr_runlist_id;
|
||||
}
|
||||
|
||||
bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id)
|
||||
{
|
||||
struct fifo_gk20a *f = NULL;
|
||||
u32 engine_id_idx;
|
||||
u32 active_engine_id;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
|
||||
if (g == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
f = &g->fifo;
|
||||
|
||||
for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
|
||||
active_engine_id = f->active_engines_list[engine_id_idx];
|
||||
engine_info = nvgpu_engine_get_active_eng_info(g, active_engine_id);
|
||||
if ((engine_info != NULL) &&
|
||||
(engine_info->runlist_id == runlist_id)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Link engine IDs to MMU IDs and vice versa.
|
||||
*/
|
||||
|
||||
static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
|
||||
{
|
||||
u32 fault_id = FIFO_INVAL_ENGINE_ID;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
|
||||
engine_info = nvgpu_engine_get_active_eng_info(g, engine_id);
|
||||
|
||||
if (engine_info != NULL) {
|
||||
fault_id = engine_info->fault_id;
|
||||
} else {
|
||||
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
|
||||
}
|
||||
return fault_id;
|
||||
}
|
||||
|
||||
static inline u32 gk20a_mmu_id_to_engine_id(struct gk20a *g, u32 fault_id)
|
||||
{
|
||||
u32 engine_id;
|
||||
u32 active_engine_id;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
|
||||
for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
|
||||
active_engine_id = f->active_engines_list[engine_id];
|
||||
engine_info = &g->fifo.engine_info[active_engine_id];
|
||||
|
||||
if (engine_info->fault_id == fault_id) {
|
||||
break;
|
||||
}
|
||||
active_engine_id = FIFO_INVAL_ENGINE_ID;
|
||||
}
|
||||
return active_engine_id;
|
||||
}
|
||||
|
||||
int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
|
||||
{
|
||||
u32 timeout;
|
||||
@@ -438,7 +316,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
||||
for_each_set_bit(engine_mmu_fault_id, &fault_id, 32U) {
|
||||
/* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to
|
||||
* engines. Convert engine_mmu_id to engine_id */
|
||||
u32 engine_id = gk20a_mmu_id_to_engine_id(g,
|
||||
u32 engine_id = nvgpu_engine_mmu_fault_id_to_engine_id(g,
|
||||
(u32)engine_mmu_fault_id);
|
||||
struct mmu_fault_info mmfault_info;
|
||||
struct channel_gk20a *ch = NULL;
|
||||
@@ -715,7 +593,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
||||
/* atleast one engine will get passed during sched err*/
|
||||
engine_ids |= __engine_ids;
|
||||
for_each_set_bit(engine_id, &engine_ids, 32U) {
|
||||
u32 mmu_id = gk20a_engine_id_to_mmu_id(g,
|
||||
u32 mmu_id = nvgpu_engine_id_to_mmu_fault_id(g,
|
||||
(u32)engine_id);
|
||||
|
||||
if (mmu_id != FIFO_INVAL_ENGINE_ID) {
|
||||
@@ -739,9 +617,11 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
||||
u32 type;
|
||||
u32 id;
|
||||
|
||||
gk20a_fifo_get_faulty_id_type(g, active_engine_id, &id, &type);
|
||||
gk20a_fifo_get_faulty_id_type(g,
|
||||
active_engine_id, &id, &type);
|
||||
if (ref_type == type && ref_id == id) {
|
||||
u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id);
|
||||
u32 mmu_id = nvgpu_engine_id_to_mmu_fault_id(g,
|
||||
active_engine_id);
|
||||
|
||||
engine_ids |= BIT(active_engine_id);
|
||||
if (mmu_id != FIFO_INVAL_ENGINE_ID) {
|
||||
|
||||
@@ -281,12 +281,6 @@ int gk20a_fifo_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
|
||||
|
||||
int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch);
|
||||
|
||||
u32 gk20a_fifo_get_fast_ce_runlist_id(struct gk20a *g);
|
||||
|
||||
u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g);
|
||||
|
||||
bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct fifo_profile_gk20a *gk20a_fifo_profile_acquire(struct gk20a *g);
|
||||
void gk20a_fifo_profile_release(struct gk20a *g,
|
||||
|
||||
@@ -136,7 +136,7 @@ int gm20b_fifo_init_ce_engine_info(struct fifo_gk20a *f)
|
||||
u32 gr_runlist_id;
|
||||
bool found_pbdma_for_runlist = false;
|
||||
|
||||
gr_runlist_id = gk20a_fifo_get_gr_runlist_id(g);
|
||||
gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||
nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id);
|
||||
|
||||
if (g->ops.top.get_device_info != NULL) {
|
||||
|
||||
@@ -55,7 +55,7 @@ int gp10b_fifo_init_ce_engine_info(struct fifo_gk20a *f)
|
||||
bool found_pbdma_for_runlist = false;
|
||||
u32 lce_num_entries = 0;
|
||||
|
||||
gr_runlist_id = gk20a_fifo_get_gr_runlist_id(g);
|
||||
gr_runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||
nvgpu_log_info(g, "gr_runlist_id: %d", gr_runlist_id);
|
||||
|
||||
if (g->ops.top.get_num_engine_type_entries != NULL) {
|
||||
|
||||
@@ -1010,7 +1010,7 @@ void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g,
|
||||
nvgpu_log_info(g, "eng method buffer NULL");
|
||||
return;
|
||||
}
|
||||
if (tsg->runlist_id == gk20a_fifo_get_fast_ce_runlist_id(g)) {
|
||||
if (tsg->runlist_id == nvgpu_engine_get_fast_ce_runlist_id(g)) {
|
||||
method_buffer_per_runque =
|
||||
&tsg->eng_method_buffers[ASYNC_CE_RUNQUE];
|
||||
} else {
|
||||
|
||||
@@ -65,4 +65,10 @@ int nvgpu_engine_disable_activity_all(struct gk20a *g,
|
||||
int nvgpu_engine_wait_for_idle(struct gk20a *g);
|
||||
void nvgpu_engine_reset(struct gk20a *g, u32 engine_id);
|
||||
|
||||
u32 nvgpu_engine_get_fast_ce_runlist_id(struct gk20a *g);
|
||||
u32 nvgpu_engine_get_gr_runlist_id(struct gk20a *g);
|
||||
bool nvgpu_engine_is_valid_runlist_id(struct gk20a *g, u32 runlist_id);
|
||||
u32 nvgpu_engine_id_to_mmu_fault_id(struct gk20a *g, u32 engine_id);
|
||||
u32 nvgpu_engine_mmu_fault_id_to_engine_id(struct gk20a *g, u32 fault_id);
|
||||
|
||||
#endif /*NVGPU_ENGINE_H*/
|
||||
|
||||
Reference in New Issue
Block a user