gpu: nvgpu: move chip specific teardown_mask/unmask_intr

Move chip specific functions for teardown_mask_intr and
teardown_unmask_intr to hal/fifo/fifo_intr_[chip].[ch]

Renamed
teardown_mask_intr -> intr_set_recover_mask
teardown_unmask_intr -> intr_unset_recover_mask

JIRA NVGPU-1314

Change-Id: If233565cbdb09d77cfebd4346edcc3fe64584355
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2093980
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-04-09 22:20:19 -07:00
committed by mobile promotions
parent 59bf3919e2
commit 6ba1f5db3b
16 changed files with 73 additions and 73 deletions

View File

@@ -329,27 +329,6 @@ bool gk20a_fifo_handle_mmu_fault(
return verbose;
}
void gk20a_fifo_teardown_mask_intr(struct gk20a *g)
{
u32 val;
val = gk20a_readl(g, fifo_intr_en_0_r());
val &= ~(fifo_intr_en_0_sched_error_m() |
fifo_intr_en_0_mmu_fault_m());
gk20a_writel(g, fifo_intr_en_0_r(), val);
gk20a_writel(g, fifo_intr_0_r(), fifo_intr_0_sched_error_reset_f());
}
void gk20a_fifo_teardown_unmask_intr(struct gk20a *g)
{
u32 val;
val = gk20a_readl(g, fifo_intr_en_0_r());
val |= fifo_intr_en_0_mmu_fault_f(1) | fifo_intr_en_0_sched_error_f(1);
gk20a_writel(g, fifo_intr_en_0_r(), val);
}
void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
u32 hw_id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault)
@@ -420,12 +399,13 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
}
if (mmu_fault_engines != 0U) {
g->ops.fifo.teardown_mask_intr(g);
g->ops.fifo.intr_set_recover_mask(g);
g->ops.fifo.trigger_mmu_fault(g, engine_ids);
gk20a_fifo_handle_mmu_fault_locked(g, mmu_fault_engines, ref_id,
ref_id_is_tsg);
g->ops.fifo.teardown_unmask_intr(g);
g->ops.fifo.intr_unset_recover_mask(g);
}
nvgpu_fifo_unlock_active_runlists(g);

View File

@@ -259,9 +259,6 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
u32 hw_id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault);
void gk20a_fifo_teardown_mask_intr(struct gk20a *g);
void gk20a_fifo_teardown_unmask_intr(struct gk20a *g);
u32 gk20a_fifo_default_timeslice_us(struct gk20a *g);
int gk20a_fifo_init_pbdma_map(struct gk20a *g, u32 *pbdma_map, u32 num_pbdma);

View File

@@ -651,8 +651,8 @@ static const struct gpu_ops gm20b_ops = {
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
.teardown_mask_intr = gk20a_fifo_teardown_mask_intr,
.teardown_unmask_intr = gk20a_fifo_teardown_unmask_intr,
.intr_set_recover_mask = gk20a_fifo_intr_set_recover_mask,
.intr_unset_recover_mask = gk20a_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,

View File

@@ -749,8 +749,8 @@ static const struct gpu_ops gp10b_ops = {
.is_preempt_pending = gk20a_fifo_is_preempt_pending,
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gk20a_fifo_teardown_ch_tsg,
.teardown_mask_intr = gk20a_fifo_teardown_mask_intr,
.teardown_unmask_intr = gk20a_fifo_teardown_unmask_intr,
.intr_set_recover_mask = gk20a_fifo_intr_set_recover_mask,
.intr_unset_recover_mask = gk20a_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,

View File

@@ -38,7 +38,7 @@ u32 gv100_fifo_get_preempt_timeout(struct gk20a *g)
return g->ctxsw_timeout_period_ms;
}
void gv100_fifo_teardown_mask_intr(struct gk20a *g)
void gv100_fifo_intr_set_recover_mask(struct gk20a *g)
{
u32 val;
@@ -48,7 +48,7 @@ void gv100_fifo_teardown_mask_intr(struct gk20a *g)
gk20a_writel(g, fifo_intr_0_r(), fifo_intr_0_sched_error_reset_f());
}
void gv100_fifo_teardown_unmask_intr(struct gk20a *g)
void gv100_fifo_intr_unset_recover_mask(struct gk20a *g)
{
u32 val;

View File

@@ -29,6 +29,6 @@
struct gk20a;
u32 gv100_fifo_get_preempt_timeout(struct gk20a *g);
void gv100_fifo_teardown_mask_intr(struct gk20a *g);
void gv100_fifo_teardown_unmask_intr(struct gk20a *g);
void gv100_fifo_intr_set_recover_mask(struct gk20a *g);
void gv100_fifo_intr_unset_recover_mask(struct gk20a *g);
#endif

View File

@@ -923,8 +923,8 @@ static const struct gpu_ops gv100_ops = {
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
.teardown_mask_intr = gv100_fifo_teardown_mask_intr,
.teardown_unmask_intr = gv100_fifo_teardown_unmask_intr,
.intr_set_recover_mask = gv100_fifo_intr_set_recover_mask,
.intr_unset_recover_mask = gv100_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,

View File

@@ -634,33 +634,6 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
}
}
void gv11b_fifo_teardown_mask_intr(struct gk20a *g)
{
u32 val;
/*
* ctxsw timeout error prevents recovery, and ctxsw error will retrigger
* every 100ms. Disable ctxsw timeout error to allow recovery.
*/
val = gk20a_readl(g, fifo_intr_en_0_r());
val &= ~ fifo_intr_0_ctxsw_timeout_pending_f();
gk20a_writel(g, fifo_intr_en_0_r(), val);
gk20a_writel(g, fifo_intr_ctxsw_timeout_r(),
gk20a_readl(g, fifo_intr_ctxsw_timeout_r()));
}
void gv11b_fifo_teardown_unmask_intr(struct gk20a *g)
{
u32 val;
/* enable ctxsw timeout interrupt */
val = gk20a_readl(g, fifo_intr_en_0_r());
val |= fifo_intr_0_ctxsw_timeout_pending_f();
gk20a_writel(g, fifo_intr_en_0_r(), val);
}
void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
u32 id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault)
@@ -683,7 +656,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
nvgpu_fifo_lock_active_runlists(g);
g->ops.fifo.teardown_mask_intr(g);
g->ops.fifo.intr_set_recover_mask(g);
/* get runlist id and tsg */
if (id_type == ID_TYPE_TSG) {
@@ -861,7 +834,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
nvgpu_warn(g, "fail to enable power mgmt");
}
g->ops.fifo.teardown_unmask_intr(g);
g->ops.fifo.intr_unset_recover_mask(g);
/* release runlist_lock */
if (runlist_id != FIFO_INVAL_RUNLIST_ID) {

View File

@@ -49,8 +49,6 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
u32 id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault);
void gv11b_fifo_teardown_mask_intr(struct gk20a *g);
void gv11b_fifo_teardown_unmask_intr(struct gk20a *g);
void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f);
int gv11b_init_fifo_reset_enable_hw(struct gk20a *g);
int gv11b_init_fifo_setup_hw(struct gk20a *g);

View File

@@ -897,8 +897,8 @@ static const struct gpu_ops gv11b_ops = {
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
.teardown_mask_intr = gv11b_fifo_teardown_mask_intr,
.teardown_unmask_intr = gv11b_fifo_teardown_unmask_intr,
.intr_set_recover_mask = gv11b_fifo_intr_set_recover_mask,
.intr_unset_recover_mask = gv11b_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.set_sm_exception_type_mask = gk20a_tsg_set_sm_exception_type_mask,

View File

@@ -290,3 +290,24 @@ bool gk20a_fifo_is_mmu_fault_pending(struct gk20a *g)
return false;
}
}
void gk20a_fifo_intr_set_recover_mask(struct gk20a *g)
{
u32 val;
val = nvgpu_readl(g, fifo_intr_en_0_r());
val &= ~(fifo_intr_en_0_sched_error_m() |
fifo_intr_en_0_mmu_fault_m());
nvgpu_writel(g, fifo_intr_en_0_r(), val);
nvgpu_writel(g, fifo_intr_0_r(), fifo_intr_0_sched_error_reset_f());
}
void gk20a_fifo_intr_unset_recover_mask(struct gk20a *g)
{
u32 val;
val = nvgpu_readl(g, fifo_intr_en_0_r());
val |= fifo_intr_en_0_mmu_fault_f(1) | fifo_intr_en_0_sched_error_f(1);
nvgpu_writel(g, fifo_intr_en_0_r(), val);
}

View File

@@ -38,5 +38,7 @@ u32 gk20a_fifo_pbdma_isr(struct gk20a *g);
bool gk20a_fifo_handle_sched_error(struct gk20a *g);
bool gk20a_fifo_is_mmu_fault_pending(struct gk20a *g);
void gk20a_fifo_intr_set_recover_mask(struct gk20a *g);
void gk20a_fifo_intr_unset_recover_mask(struct gk20a *g);
#endif /* NVGPU_FIFO_INTR_GK20A_H */

View File

@@ -230,3 +230,29 @@ void gv11b_fifo_intr_0_isr(struct gk20a *g)
nvgpu_writel(g, fifo_intr_0_r(), clear_intr);
}
void gv11b_fifo_intr_set_recover_mask(struct gk20a *g)
{
u32 val;
/*
* ctxsw timeout error prevents recovery, and ctxsw error will retrigger
* every 100ms. Disable ctxsw timeout error to allow recovery.
*/
val = nvgpu_readl(g, fifo_intr_en_0_r());
val &= ~fifo_intr_0_ctxsw_timeout_pending_f();
nvgpu_writel(g, fifo_intr_en_0_r(), val);
nvgpu_writel(g, fifo_intr_ctxsw_timeout_r(),
nvgpu_readl(g, fifo_intr_ctxsw_timeout_r()));
}
void gv11b_fifo_intr_unset_recover_mask(struct gk20a *g)
{
u32 val;
/* enable ctxsw timeout interrupt */
val = nvgpu_readl(g, fifo_intr_en_0_r());
val |= fifo_intr_0_ctxsw_timeout_pending_f();
nvgpu_writel(g, fifo_intr_en_0_r(), val);
}

View File

@@ -42,4 +42,7 @@ void gv11b_fifo_intr_0_isr(struct gk20a *g);
bool gv11b_fifo_handle_sched_error(struct gk20a *g);
void gv11b_fifo_intr_set_recover_mask(struct gk20a *g);
void gv11b_fifo_intr_unset_recover_mask(struct gk20a *g);
#endif /* NVGPU_FIFO_INTR_GV11B_H */

View File

@@ -985,8 +985,8 @@ struct gpu_ops {
void (*teardown_ch_tsg)(struct gk20a *g, u32 act_eng_bitmask,
u32 id, unsigned int id_type, unsigned int rc_type,
struct mmu_fault_info *mmfault);
void (*teardown_mask_intr)(struct gk20a *g);
void (*teardown_unmask_intr)(struct gk20a *g);
void (*intr_set_recover_mask)(struct gk20a *g);
void (*intr_unset_recover_mask)(struct gk20a *g);
u32 (*get_preempt_timeout)(struct gk20a *g);
int (*init_pdb_cache_war)(struct gk20a *g);
void (*deinit_pdb_cache_war)(struct gk20a *g);

View File

@@ -959,8 +959,8 @@ static const struct gpu_ops tu104_ops = {
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
.teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
.teardown_mask_intr = gv11b_fifo_teardown_mask_intr,
.teardown_unmask_intr = gv11b_fifo_teardown_unmask_intr,
.intr_set_recover_mask = gv11b_fifo_intr_set_recover_mask,
.intr_unset_recover_mask = gv11b_fifo_intr_unset_recover_mask,
.setup_sw = nvgpu_fifo_setup_sw,
.cleanup_sw = nvgpu_fifo_cleanup_sw,
.init_pdb_cache_war = tu104_init_pdb_cache_war,