mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: move get_engines_mask_on_id from fifo to engine
Move get_engines_mask_on_id fifo hal to engine hal as get_mask_on_id Rename gk20a_fifo_engines_on_id to nvgpu_engine_get_mask_on_id JIRA NVGPU-1313 Change-Id: I3582195e0a0d6f6722e9f160331e77d1a338783e Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2084320 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
584e9dee8d
commit
d0f45117f1
@@ -1485,7 +1485,7 @@ void nvgpu_channel_recover(struct gk20a *g, struct channel_gk20a *ch,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
engines = g->ops.fifo.get_engines_mask_on_id(g, ch->chid, false);
|
||||
engines = g->ops.engine.get_mask_on_id(g, ch->chid, false);
|
||||
|
||||
if (engines != 0U) {
|
||||
gk20a_fifo_recover(g, engines, ch->chid, false, true, verbose,
|
||||
|
||||
@@ -693,3 +693,42 @@ u32 nvgpu_engine_mmu_fault_id_to_engine_id(struct gk20a *g, u32 fault_id)
|
||||
}
|
||||
return active_engine_id;
|
||||
}
|
||||
|
||||
u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 engines = 0;
|
||||
struct nvgpu_engine_status_info engine_status;
|
||||
u32 ctx_id;
|
||||
u32 type;
|
||||
bool busy;
|
||||
|
||||
for (i = 0; i < g->fifo.num_engines; i++) {
|
||||
u32 active_engine_id = g->fifo.active_engines_list[i];
|
||||
|
||||
g->ops.engine_status.read_engine_status_info(g,
|
||||
active_engine_id, &engine_status);
|
||||
|
||||
if (nvgpu_engine_status_is_ctxsw_load(
|
||||
&engine_status)) {
|
||||
nvgpu_engine_status_get_next_ctx_id_type(
|
||||
&engine_status, &ctx_id, &type);
|
||||
} else {
|
||||
nvgpu_engine_status_get_ctx_id_type(
|
||||
&engine_status, &ctx_id, &type);
|
||||
}
|
||||
|
||||
busy = engine_status.is_busy;
|
||||
|
||||
if (busy && ctx_id == id) {
|
||||
if ((is_tsg && type ==
|
||||
ENGINE_STATUS_CTX_ID_TYPE_TSGID) ||
|
||||
(!is_tsg && type ==
|
||||
ENGINE_STATUS_CTX_ID_TYPE_CHID)) {
|
||||
engines |= BIT(active_engine_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return engines;
|
||||
}
|
||||
|
||||
@@ -175,13 +175,13 @@ void nvgpu_tsg_recover(struct gk20a *g, struct tsg_gk20a *tsg,
|
||||
nvgpu_err(g, "failed to disable ctxsw");
|
||||
} else {
|
||||
/* recover engines if tsg is loaded on the engines */
|
||||
engines_mask = g->ops.fifo.get_engines_mask_on_id(g,
|
||||
engines_mask = g->ops.engine.get_mask_on_id(g,
|
||||
tsg->tsgid, true);
|
||||
|
||||
/*
|
||||
* it is ok to enable ctxsw before tsg is recovered. If engines
|
||||
* is 0, no engine recovery is needed and if it is non zero,
|
||||
* gk20a_fifo_recover will call get_engines_mask_on_id again.
|
||||
* gk20a_fifo_recover will call get_mask_on_id again.
|
||||
* By that time if tsg is not on the engine, engine need not
|
||||
* be reset.
|
||||
*/
|
||||
|
||||
@@ -429,7 +429,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.tsg_release = vgpu_tsg_release,
|
||||
.force_reset_ch = vgpu_fifo_force_reset_ch,
|
||||
.init_engine_info = vgpu_fifo_init_engine_info,
|
||||
.get_engines_mask_on_id = NULL,
|
||||
.dump_channel_status_ramfc = NULL,
|
||||
.is_preempt_pending = NULL,
|
||||
.reset_enable_hw = NULL,
|
||||
@@ -461,6 +460,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
},
|
||||
.engine = {
|
||||
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
||||
.get_mask_on_id = NULL,
|
||||
},
|
||||
.pbdma = {
|
||||
.intr_enable = NULL,
|
||||
|
||||
@@ -510,7 +510,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.tsg_release = vgpu_tsg_release,
|
||||
.force_reset_ch = vgpu_fifo_force_reset_ch,
|
||||
.init_engine_info = vgpu_fifo_init_engine_info,
|
||||
.get_engines_mask_on_id = NULL,
|
||||
.dump_channel_status_ramfc = NULL,
|
||||
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
||||
.reset_enable_hw = NULL,
|
||||
@@ -547,6 +546,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
},
|
||||
.engine = {
|
||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||
.get_mask_on_id = NULL,
|
||||
},
|
||||
.pbdma = {
|
||||
.intr_enable = NULL,
|
||||
|
||||
@@ -241,7 +241,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (tsg != NULL) {
|
||||
engines = g->ops.fifo.get_engines_mask_on_id(g,
|
||||
engines = g->ops.engine.get_mask_on_id(g,
|
||||
tsg->tsgid, true);
|
||||
} else {
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
@@ -504,44 +504,6 @@ static void gk20a_fifo_get_faulty_id_type(struct gk20a *g, u32 engine_id,
|
||||
}
|
||||
}
|
||||
|
||||
u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 engines = 0;
|
||||
struct nvgpu_engine_status_info engine_status;
|
||||
u32 ctx_id;
|
||||
u32 type;
|
||||
bool busy;
|
||||
|
||||
for (i = 0; i < g->fifo.num_engines; i++) {
|
||||
u32 active_engine_id = g->fifo.active_engines_list[i];
|
||||
g->ops.engine_status.read_engine_status_info(g,
|
||||
active_engine_id, &engine_status);
|
||||
|
||||
if (nvgpu_engine_status_is_ctxsw_load(
|
||||
&engine_status)) {
|
||||
nvgpu_engine_status_get_next_ctx_id_type(
|
||||
&engine_status, &ctx_id, &type);
|
||||
} else {
|
||||
nvgpu_engine_status_get_ctx_id_type(
|
||||
&engine_status, &ctx_id, &type);
|
||||
}
|
||||
|
||||
busy = engine_status.is_busy;
|
||||
|
||||
if (busy && ctx_id == id) {
|
||||
if ((is_tsg && type ==
|
||||
ENGINE_STATUS_CTX_ID_TYPE_TSGID) ||
|
||||
(!is_tsg && type ==
|
||||
ENGINE_STATUS_CTX_ID_TYPE_CHID)) {
|
||||
engines |= BIT(active_engine_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return engines;
|
||||
}
|
||||
|
||||
void gk20a_fifo_teardown_mask_intr(struct gk20a *g)
|
||||
{
|
||||
u32 val;
|
||||
@@ -583,7 +545,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
||||
nvgpu_fifo_lock_active_runlists(g);
|
||||
|
||||
if (id_is_known) {
|
||||
engine_ids = g->ops.fifo.get_engines_mask_on_id(g,
|
||||
engine_ids = g->ops.engine.get_mask_on_id(g,
|
||||
hw_id, id_is_tsg);
|
||||
ref_id = hw_id;
|
||||
ref_type = id_is_tsg ?
|
||||
|
||||
@@ -259,8 +259,6 @@ int gk20a_fifo_suspend(struct gk20a *g);
|
||||
|
||||
bool gk20a_fifo_mmu_fault_pending(struct gk20a *g);
|
||||
|
||||
u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg);
|
||||
|
||||
void gk20a_fifo_recover(struct gk20a *g,
|
||||
u32 engine_ids, /* if zero, will be queried from HW */
|
||||
u32 hw_id, /* if ~0, will be queried from HW */
|
||||
|
||||
@@ -615,7 +615,6 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
.init_pbdma_info = gk20a_fifo_init_pbdma_info,
|
||||
.init_engine_info = gm20b_fifo_init_engine_info,
|
||||
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
|
||||
.dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc,
|
||||
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
|
||||
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
|
||||
@@ -651,6 +650,7 @@ static const struct gpu_ops gm20b_ops = {
|
||||
},
|
||||
.engine = {
|
||||
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
||||
.get_mask_on_id = nvgpu_engine_get_mask_on_id,
|
||||
},
|
||||
.pbdma = {
|
||||
.intr_enable = gm20b_pbdma_intr_enable,
|
||||
|
||||
@@ -703,7 +703,6 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
.init_pbdma_info = gk20a_fifo_init_pbdma_info,
|
||||
.init_engine_info = gm20b_fifo_init_engine_info,
|
||||
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
|
||||
.dump_channel_status_ramfc = gk20a_dump_channel_status_ramfc,
|
||||
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
|
||||
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
|
||||
@@ -739,6 +738,7 @@ static const struct gpu_ops gp10b_ops = {
|
||||
},
|
||||
.engine = {
|
||||
.is_fault_engine_subid_gpc = gm20b_is_fault_engine_subid_gpc,
|
||||
.get_mask_on_id = nvgpu_engine_get_mask_on_id,
|
||||
},
|
||||
.pbdma = {
|
||||
.intr_enable = gm20b_pbdma_intr_enable,
|
||||
|
||||
@@ -880,7 +880,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
.init_engine_info = gm20b_fifo_init_engine_info,
|
||||
.init_pbdma_info = gk20a_fifo_init_pbdma_info,
|
||||
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
|
||||
.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
|
||||
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
||||
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
|
||||
@@ -922,6 +921,7 @@ static const struct gpu_ops gv100_ops = {
|
||||
},
|
||||
.engine = {
|
||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||
.get_mask_on_id = nvgpu_engine_get_mask_on_id,
|
||||
},
|
||||
.pbdma = {
|
||||
.intr_enable = gv11b_pbdma_intr_enable,
|
||||
|
||||
@@ -835,7 +835,6 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
.init_engine_info = gm20b_fifo_init_engine_info,
|
||||
.init_pbdma_info = gk20a_fifo_init_pbdma_info,
|
||||
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
|
||||
.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
|
||||
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
||||
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
|
||||
@@ -877,6 +876,7 @@ static const struct gpu_ops gv11b_ops = {
|
||||
},
|
||||
.engine = {
|
||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||
.get_mask_on_id = nvgpu_engine_get_mask_on_id,
|
||||
},
|
||||
.pbdma = {
|
||||
.intr_enable = gv11b_pbdma_intr_enable,
|
||||
|
||||
@@ -71,4 +71,6 @@ bool nvgpu_engine_is_valid_runlist_id(struct gk20a *g, u32 runlist_id);
|
||||
u32 nvgpu_engine_id_to_mmu_fault_id(struct gk20a *g, u32 engine_id);
|
||||
u32 nvgpu_engine_mmu_fault_id_to_engine_id(struct gk20a *g, u32 fault_id);
|
||||
|
||||
u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg);
|
||||
|
||||
#endif /*NVGPU_ENGINE_H*/
|
||||
|
||||
@@ -950,8 +950,6 @@ struct gpu_ops {
|
||||
void (*tsg_release)(struct tsg_gk20a *tsg);
|
||||
int (*init_pbdma_info)(struct fifo_gk20a *f);
|
||||
int (*init_engine_info)(struct fifo_gk20a *f);
|
||||
u32 (*get_engines_mask_on_id)(struct gk20a *g,
|
||||
u32 id, bool is_tsg);
|
||||
void (*free_channel_ctx_header)(struct channel_gk20a *ch);
|
||||
void (*dump_channel_status_ramfc)(struct gk20a *g,
|
||||
struct gk20a_debug_output *o,
|
||||
@@ -1069,6 +1067,8 @@ struct gpu_ops {
|
||||
struct {
|
||||
bool (*is_fault_engine_subid_gpc)(struct gk20a *g,
|
||||
u32 engine_subid);
|
||||
u32 (*get_mask_on_id)(struct gk20a *g,
|
||||
u32 id, bool is_tsg);
|
||||
} engine;
|
||||
|
||||
struct {
|
||||
|
||||
@@ -915,7 +915,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.force_reset_ch = gk20a_fifo_force_reset_ch,
|
||||
.init_engine_info = gm20b_fifo_init_engine_info,
|
||||
.init_pbdma_info = gk20a_fifo_init_pbdma_info,
|
||||
.get_engines_mask_on_id = gk20a_fifo_engines_on_id,
|
||||
.dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
|
||||
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
|
||||
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
|
||||
@@ -959,6 +958,7 @@ static const struct gpu_ops tu104_ops = {
|
||||
},
|
||||
.engine = {
|
||||
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
|
||||
.get_mask_on_id = nvgpu_engine_get_mask_on_id,
|
||||
},
|
||||
.pbdma = {
|
||||
.intr_enable = gv11b_pbdma_intr_enable,
|
||||
|
||||
Reference in New Issue
Block a user