gpu: nvgpu: move engine functions

Move engine functions from fifo_gv11b.c to common/fifo/engines

Add fifo.mmu_fault_id_to_pbdma_id hal

JIRA NVGPU-1313

Change-Id: I6a6ac385a64c4908098ea9e483544b1e1b2d0c58
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2098950
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-04-16 13:20:52 -07:00
committed by mobile promotions
parent 7b2db862fe
commit 7a440fb721
10 changed files with 81 additions and 72 deletions

View File

@@ -942,3 +942,65 @@ bool nvgpu_engine_should_defer_reset(struct gk20a *g, u32 engine_id,
return g->ops.engine.is_fault_engine_subid_gpc(g, engine_subid);
}
u32 nvgpu_engine_mmu_fault_id_to_veid(struct gk20a *g, u32 mmu_fault_id,
u32 gr_eng_fault_id)
{
struct fifo_gk20a *f = &g->fifo;
u32 num_subctx;
u32 veid = INVAL_ID;
num_subctx = f->max_subctx_count;
if (mmu_fault_id >= gr_eng_fault_id &&
mmu_fault_id < (gr_eng_fault_id + num_subctx)) {
veid = mmu_fault_id - gr_eng_fault_id;
}
return veid;
}
u32 nvgpu_engine_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g,
u32 mmu_fault_id, u32 *veid)
{
u32 engine_id;
u32 act_eng_id;
struct fifo_engine_info_gk20a *engine_info;
struct fifo_gk20a *f = &g->fifo;
for (engine_id = 0U; engine_id < f->num_engines; engine_id++) {
act_eng_id = f->active_engines_list[engine_id];
engine_info = &g->fifo.engine_info[act_eng_id];
if (act_eng_id == NVGPU_ENGINE_GR_GK20A) {
/* get faulted subctx id */
*veid = nvgpu_engine_mmu_fault_id_to_veid(g,
mmu_fault_id, engine_info->fault_id);
if (*veid != INVAL_ID) {
break;
}
} else {
if (engine_info->fault_id == mmu_fault_id) {
break;
}
}
act_eng_id = INVAL_ID;
}
return act_eng_id;
}
void nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id(struct gk20a *g,
u32 mmu_fault_id, u32 *act_eng_id, u32 *veid, u32 *pbdma_id)
{
*act_eng_id = nvgpu_engine_mmu_fault_id_to_eng_id_and_veid(g,
mmu_fault_id, veid);
if (*act_eng_id == INVAL_ID) {
*pbdma_id = g->ops.fifo.mmu_fault_id_to_pbdma_id(g,
mmu_fault_id);
} else {
*pbdma_id = INVAL_ID;
}
}

View File

@@ -517,6 +517,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL,
.mmu_fault_id_to_pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id,
},
.engine = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,

View File

@@ -323,7 +323,7 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
}
if ((rc_type == RC_TYPE_MMU_FAULT) && (mmfault != NULL)) {
if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) {
if (mmfault->faulted_pbdma != INVAL_ID) {
pbdma_bitmask = BIT32(mmfault->faulted_pbdma);
}
@@ -869,59 +869,11 @@ int gv11b_init_fifo_setup_hw(struct gk20a *g)
return 0;
}
static u32 gv11b_mmu_fault_id_to_gr_veid(struct gk20a *g, u32 gr_eng_fault_id,
u32 mmu_fault_id)
{
struct fifo_gk20a *f = &g->fifo;
u32 num_subctx;
u32 veid = FIFO_INVAL_VEID;
num_subctx = f->max_subctx_count;
if (mmu_fault_id >= gr_eng_fault_id &&
mmu_fault_id < (gr_eng_fault_id + num_subctx)) {
veid = mmu_fault_id - gr_eng_fault_id;
}
return veid;
}
static u32 gv11b_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g,
u32 mmu_fault_id, u32 *veid)
{
u32 engine_id;
u32 active_engine_id;
struct fifo_engine_info_gk20a *engine_info;
struct fifo_gk20a *f = &g->fifo;
for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
active_engine_id = f->active_engines_list[engine_id];
engine_info = &g->fifo.engine_info[active_engine_id];
if (active_engine_id == NVGPU_ENGINE_GR_GK20A) {
/* get faulted subctx id */
*veid = gv11b_mmu_fault_id_to_gr_veid(g,
engine_info->fault_id, mmu_fault_id);
if (*veid != FIFO_INVAL_VEID) {
break;
}
} else {
if (engine_info->fault_id == mmu_fault_id) {
break;
}
}
active_engine_id = FIFO_INVAL_ENGINE_ID;
}
return active_engine_id;
}
static u32 gv11b_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id)
u32 gv11b_fifo_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id)
{
u32 num_pbdma, reg_val, fault_id_pbdma0;
reg_val = gk20a_readl(g, fifo_cfg0_r());
reg_val = nvgpu_readl(g, fifo_cfg0_r());
num_pbdma = fifo_cfg0_num_pbdma_v(reg_val);
fault_id_pbdma0 = fifo_cfg0_pbdma_fault_id_v(reg_val);
@@ -930,18 +882,5 @@ static u32 gv11b_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id)
return mmu_fault_id - fault_id_pbdma0;
}
return FIFO_INVAL_PBDMA_ID;
}
void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g,
u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id)
{
*active_engine_id = gv11b_mmu_fault_id_to_eng_id_and_veid(g,
mmu_fault_id, veid);
if (*active_engine_id == FIFO_INVAL_ENGINE_ID) {
*pbdma_id = gv11b_mmu_fault_id_to_pbdma_id(g, mmu_fault_id);
} else {
*pbdma_id = FIFO_INVAL_PBDMA_ID;
}
return INVAL_ID;
}

View File

@@ -27,17 +27,13 @@
#define PBDMA_SUBDEVICE_ID 1U
#define FIFO_INVAL_PBDMA_ID (~U32(0U))
#define FIFO_INVAL_VEID (~U32(0U))
#define CHANNEL_INFO_VEID0 0U
#define MAX_PRE_SI_RETRIES 200000U /* 1G/500KHz * 100 */
struct gpu_ops;
void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g,
u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id);
u32 gv11b_fifo_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id);
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type);

View File

@@ -590,7 +590,7 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
mmfault->mmu_engine_id =
gmmu_fault_buf_entry_engine_id_v(rd32_val);
gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id,
nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id(g, mmfault->mmu_engine_id,
&mmfault->faulted_engine, &mmfault->faulted_subid,
&mmfault->faulted_pbdma);
@@ -918,7 +918,7 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
mmfault->inst_aperture = fb_mmu_fault_inst_lo_aperture_v(reg_val);
mmfault->mmu_engine_id = fb_mmu_fault_inst_lo_engine_id_v(reg_val);
gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id,
nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id(g, mmfault->mmu_engine_id,
&mmfault->faulted_engine, &mmfault->faulted_subid,
&mmfault->faulted_pbdma);

View File

@@ -927,6 +927,7 @@ static const struct gpu_ops gv100_ops = {
.get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL,
.mmu_fault_id_to_pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id,
},
.engine = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,

View File

@@ -900,6 +900,7 @@ static const struct gpu_ops gv11b_ops = {
.get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL,
.mmu_fault_id_to_pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id,
},
.engine = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,

View File

@@ -962,6 +962,7 @@ static const struct gpu_ops tu104_ops = {
.get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL,
.mmu_fault_id_to_pbdma_id = gv11b_fifo_mmu_fault_id_to_pbdma_id,
},
.engine = {
.is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,

View File

@@ -83,4 +83,10 @@ u32 nvgpu_engine_get_runlist_busy_engines(struct gk20a *g, u32 runlist_id);
bool nvgpu_engine_should_defer_reset(struct gk20a *g, u32 engine_id,
u32 engine_subid, bool fake_fault);
u32 nvgpu_engine_mmu_fault_id_to_veid(struct gk20a *g, u32 mmu_fault_id,
u32 gr_eng_fault_id);
u32 nvgpu_engine_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g,
u32 mmu_fault_id, u32 *veid);
void nvgpu_engine_mmu_fault_id_to_eng_ve_pbdma_id(struct gk20a *g,
u32 mmu_fault_id, u32 *act_eng_id, u32 *veid, u32 *pbdma_id);
#endif /*NVGPU_ENGINE_H*/

View File

@@ -1010,6 +1010,8 @@ struct gpu_ops {
struct mmu_fault_info *mmfault);
void (*get_mmu_fault_gpc_desc)(struct mmu_fault_info *mmfault);
bool (*is_mmu_fault_pending)(struct gk20a *g);
u32 (*mmu_fault_id_to_pbdma_id)(struct gk20a *g,
u32 mmu_fault_id);
} fifo;
struct {