gpu: nvgpu: add HALs to mmu fault descriptors.

mmu fault information for client and gpc differ
on various chip. Add separate table for each chip
based on that change and add hal functions to access
those descriptors.

bug 2050564

Change-Id: If15a4757762569d60d4ce1a6a47b8c9a93c11cb0
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1704105
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2018-04-27 09:33:07 -07:00
committed by mobile promotions
parent 76597927e4
commit 010439ba08
16 changed files with 244 additions and 39 deletions

View File

@@ -1143,7 +1143,7 @@ gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr)
/* fault info/descriptions.
* tbd: move to setup
* */
static const char * const fault_type_descs[] = {
static const char * const gk20a_fault_type_descs[] = {
"pde", /*fifo_intr_mmu_fault_info_type_pde_v() == 0 */
"pde size",
"pte",
@@ -1167,15 +1167,15 @@ static const char * const engine_subid_descs[] = {
"hub",
};
static const char * const hub_client_descs[] = {
static const char * const gk20a_hub_client_descs[] = {
"vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
"host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld",
"niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
"scc nb", "sec", "ssync", "gr copy", "ce2", "xv", "mmu nb",
"scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb",
"msenc", "d falcon", "sked", "a falcon", "n/a",
};
static const char * const gpc_client_descs[] = {
static const char * const gk20a_gpc_client_descs[] = {
"l1 0", "t1 0", "pe 0",
"l1 1", "t1 1", "pe 1",
"l1 2", "t1 2", "pe 2",
@@ -1186,28 +1186,54 @@ static const char * const gpc_client_descs[] = {
"l1 5", "t1 5", "pe 5",
"l1 6", "t1 6", "pe 6",
"l1 7", "t1 7", "pe 7",
"gpm",
"ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3",
"rgg utlb",
};
static const char * const does_not_exist[] = {
"does not exist"
};
/* fill in mmu fault desc */
void gk20a_fifo_get_mmu_fault_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->fault_type >= ARRAY_SIZE(gk20a_fault_type_descs))
WARN_ON(mmfault->fault_type >=
ARRAY_SIZE(gk20a_fault_type_descs));
else
mmfault->fault_type_desc =
gk20a_fault_type_descs[mmfault->fault_type];
}
/* fill in mmu fault client description */
void gk20a_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gk20a_hub_client_descs))
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gk20a_hub_client_descs));
else
mmfault->client_id_desc =
gk20a_hub_client_descs[mmfault->client_id];
}
/* fill in mmu fault gpc description */
void gk20a_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gk20a_gpc_client_descs))
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gk20a_gpc_client_descs));
else
mmfault->client_id_desc =
gk20a_gpc_client_descs[mmfault->client_id];
}
static void get_exception_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
struct mmu_fault_info *mmfault)
{
g->ops.fifo.get_mmu_fault_info(g, mmu_fault_id, mmfault);
/* parse info */
if (mmfault->fault_type >= ARRAY_SIZE(fault_type_descs)) {
WARN_ON(mmfault->fault_type >= ARRAY_SIZE(fault_type_descs));
mmfault->fault_type_desc = does_not_exist[0];
} else {
mmfault->fault_type_desc =
fault_type_descs[mmfault->fault_type];
}
mmfault->fault_type_desc = does_not_exist[0];
if (g->ops.fifo.get_mmu_fault_desc)
g->ops.fifo.get_mmu_fault_desc(mmfault);
if (mmfault->client_type >= ARRAY_SIZE(engine_subid_descs)) {
WARN_ON(mmfault->client_type >= ARRAY_SIZE(engine_subid_descs));
@@ -1218,25 +1244,14 @@ static void get_exception_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
}
mmfault->client_id_desc = does_not_exist[0];
if (mmfault->client_type ==
fifo_intr_mmu_fault_info_engine_subid_hub_v()) {
if (mmfault->client_id >=
ARRAY_SIZE(hub_client_descs))
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(hub_client_descs));
else
mmfault->client_id_desc =
hub_client_descs[mmfault->client_id];
} else if (mmfault->client_type ==
fifo_intr_mmu_fault_info_engine_subid_gpc_v()) {
if (mmfault->client_id >= ARRAY_SIZE(gpc_client_descs))
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gpc_client_descs));
else
mmfault->client_id_desc =
gpc_client_descs[mmfault->client_id];
}
if ((mmfault->client_type ==
fifo_intr_mmu_fault_info_engine_subid_hub_v())
&& g->ops.fifo.get_mmu_fault_client_desc)
g->ops.fifo.get_mmu_fault_client_desc(mmfault);
else if ((mmfault->client_type ==
fifo_intr_mmu_fault_info_engine_subid_gpc_v())
&& g->ops.fifo.get_mmu_fault_gpc_desc)
g->ops.fifo.get_mmu_fault_gpc_desc(mmfault);
}
/* reads info from hardware and fills in mmu fault info record */

View File

@@ -441,4 +441,7 @@ int gk20a_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
void gk20a_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
struct mmu_fault_info *mmfault);
void gk20a_fifo_get_mmu_fault_desc(struct mmu_fault_info *mmfault);
void gk20a_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault);
void gk20a_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault);
#endif /*__GR_GK20A_H__*/

View File

@@ -574,6 +574,10 @@ struct gpu_ops {
unsigned long engine_ids);
void (*get_mmu_fault_info)(struct gk20a *g, u32 mmu_fault_id,
struct mmu_fault_info *mmfault);
void (*get_mmu_fault_desc)(struct mmu_fault_info *mmfault);
void (*get_mmu_fault_client_desc)(
struct mmu_fault_info *mmfault);
void (*get_mmu_fault_gpc_desc)(struct mmu_fault_info *mmfault);
void (*apply_pb_timeout)(struct gk20a *g);
void (*apply_ctxsw_timeout_intr)(struct gk20a *g);
int (*wait_engine_idle)(struct gk20a *g);

View File

@@ -1,7 +1,7 @@
/*
* GM20B Fifo
*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -223,3 +223,34 @@ void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch)
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
}
}
static const char * const gm20b_gpc_client_descs[] = {
"l1 0", "t1 0", "pe 0",
"l1 1", "t1 1", "pe 1",
"l1 2", "t1 2", "pe 2",
"l1 3", "t1 3", "pe 3",
"rast", "gcc", "gpccs",
"prop 0", "prop 1", "prop 2", "prop 3",
"l1 4", "t1 4", "pe 4",
"l1 5", "t1 5", "pe 5",
"l1 6", "t1 6", "pe 6",
"l1 7", "t1 7", "pe 7",
"l1 9", "t1 9", "pe 9",
"l1 10", "t1 10", "pe 10",
"l1 11", "t1 11", "pe 11",
"unknown", "unknown", "unknown", "unknown",
"tpccs 0", "tpccs 1", "tpccs 2",
"tpccs 3", "tpccs 4", "tpccs 5",
"tpccs 6", "tpccs 7", "tpccs 8",
"tpccs 9", "tpccs 10", "tpccs 11",
};
void gm20b_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gm20b_gpc_client_descs))
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gm20b_gpc_client_descs));
else
mmfault->client_id_desc =
gm20b_gpc_client_descs[mmfault->client_id];
}

View File

@@ -1,7 +1,7 @@
/*
* GM20B Fifo
*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,7 @@
#ifndef _NVHOST_GM20B_FIFO
#define _NVHOST_GM20B_FIFO
struct gk20a;
struct mmu_fault_info;
void channel_gm20b_bind(struct channel_gk20a *c);
void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
@@ -35,5 +36,6 @@ void gm20b_device_info_data_parse(struct gk20a *g,
u32 *pri_base, u32 *fault_id);
void gm20b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f);
void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch);
void gm20b_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault);
#endif

View File

@@ -416,6 +416,9 @@ static const struct gpu_ops gm20b_ops = {
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
.get_mmu_fault_info = gk20a_fifo_get_mmu_fault_info,
.get_mmu_fault_desc = gk20a_fifo_get_mmu_fault_desc,
.get_mmu_fault_client_desc = gk20a_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gm20b_fifo_get_num_fifos,
.get_pbdma_signature = gk20a_fifo_get_pbdma_signature,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,3 +32,72 @@ u32 gp106_fifo_get_num_fifos(struct gk20a *g)
{
return ccsr_channel__size_1_v();
}
static const char * const gp106_hub_client_descs[] = {
"vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
"host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld",
"niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
"scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb",
"msenc", "d falcon", "sked", "a falcon", "n/a",
"hsce0", "hsce1", "hsce2", "hsce3", "hsce4", "hsce5",
"hsce6", "hsce7", "hsce8", "hsce9", "hshub",
"ptp x0", "ptp x1", "ptp x2", "ptp x3", "ptp x4",
"ptp x5", "ptp x6", "ptp x7", "vpr scrubber0", "vpr scrubber1",
"dwbif", "fbfalcon",
};
static const char * const gp106_gpc_client_descs[] = {
"l1 0", "t1 0", "pe 0",
"l1 1", "t1 1", "pe 1",
"l1 2", "t1 2", "pe 2",
"l1 3", "t1 3", "pe 3",
"rast", "gcc", "gpccs",
"prop 0", "prop 1", "prop 2", "prop 3",
"l1 4", "t1 4", "pe 4",
"l1 5", "t1 5", "pe 5",
"l1 6", "t1 6", "pe 6",
"l1 7", "t1 7", "pe 7",
"l1 9", "t1 9", "pe 9",
"l1 10", "t1 10", "pe 10",
"l1 11", "t1 11", "pe 11",
"unknown", "unknown", "unknown", "unknown",
"tpccs 0", "tpccs 1", "tpccs 2",
"tpccs 3", "tpccs 4", "tpccs 5",
"tpccs 6", "tpccs 7", "tpccs 8",
"tpccs 9", "tpccs 10", "tpccs 11",
"tpccs 12", "tpccs 13", "tpccs 14",
"tpccs 15", "tpccs 16", "tpccs 17",
"tpccs 18", "tpccs 19", "unknown", "unknown",
"unknown", "unknown", "unknown", "unknown",
"unknown", "unknown", "unknown", "unknown",
"unknown", "unknown",
"l1 12", "t1 12", "pe 12",
"l1 13", "t1 13", "pe 13",
"l1 14", "t1 14", "pe 14",
"l1 15", "t1 15", "pe 15",
"l1 16", "t1 16", "pe 16",
"l1 17", "t1 17", "pe 17",
"l1 18", "t1 18", "pe 18",
"l1 19", "t1 19", "pe 19",
};
void gp106_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gp106_gpc_client_descs))
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gp106_gpc_client_descs));
else
mmfault->client_id_desc =
gp106_gpc_client_descs[mmfault->client_id];
}
/* fill in mmu fault client description */
void gp106_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gp106_hub_client_descs))
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gp106_hub_client_descs));
else
mmfault->client_id_desc =
gp106_hub_client_descs[mmfault->client_id];
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,5 +23,9 @@
#ifndef NVGPU_FIFO_GP106_H
#define NVGPU_FIFO_GP106_H
struct gk20a;
struct mmu_fault_info;
u32 gp106_fifo_get_num_fifos(struct gk20a *g);
void gp106_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault);
void gp106_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault);
#endif

View File

@@ -478,6 +478,9 @@ static const struct gpu_ops gp106_ops = {
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
.get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info,
.get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc,
.get_mmu_fault_client_desc = gp106_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gp106_fifo_get_mmu_fault_gpc_desc,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gp106_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,

View File

@@ -1,7 +1,7 @@
/*
* GP10B fifo
*
* Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -317,3 +317,57 @@ void gp10b_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
/* note: inst_ptr is a 40b phys addr. */
mmfault->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v();
}
/* fault info/descriptions */
static const char * const gp10b_fault_type_descs[] = {
"pde", /*fifo_intr_mmu_fault_info_type_pde_v() == 0 */
"pde size",
"pte",
"va limit viol",
"unbound inst",
"priv viol",
"ro viol",
"wo viol",
"pitch mask",
"work creation",
"bad aperture",
"compression failure",
"bad kind",
"region viol",
"dual ptes",
"poisoned",
"atomic violation",
};
static const char * const gp10b_hub_client_descs[] = {
"vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
"host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld",
"niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
"scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb",
"msenc", "d falcon", "sked", "a falcon", "n/a",
"hsce0", "hsce1", "hsce2", "hsce3", "hsce4", "hsce5",
"hsce6", "hsce7", "hsce8", "hsce9", "hshub",
"ptp x0", "ptp x1", "ptp x2", "ptp x3", "ptp x4",
"ptp x5", "ptp x6", "ptp x7", "vpr scrubber0", "vpr scrubber1",
};
/* fill in mmu fault desc */
void gp10b_fifo_get_mmu_fault_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->fault_type >= ARRAY_SIZE(gp10b_fault_type_descs))
WARN_ON(mmfault->fault_type >=
ARRAY_SIZE(gp10b_fault_type_descs));
else
mmfault->fault_type_desc =
gp10b_fault_type_descs[mmfault->fault_type];
}
/* fill in mmu fault client description */
void gp10b_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gp10b_hub_client_descs))
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gp10b_hub_client_descs));
else
mmfault->client_id_desc =
gp10b_hub_client_descs[mmfault->client_id];
}

View File

@@ -1,7 +1,7 @@
/*
* GP10B Fifo
*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -42,6 +42,8 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
void gp10b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f);
void gp10b_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
struct mmu_fault_info *mmfault);
void gp10b_fifo_get_mmu_fault_desc(struct mmu_fault_info *mmfault);
void gp10b_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault);
int channel_gp10b_commit_userd(struct channel_gk20a *c);
#endif

View File

@@ -451,6 +451,9 @@ static const struct gpu_ops gp10b_ops = {
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
.get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info,
.get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc,
.get_mmu_fault_client_desc = gp10b_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gm20b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,

View File

@@ -534,6 +534,9 @@ static const struct gpu_ops gv100_ops = {
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gv100_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,

View File

@@ -506,6 +506,9 @@ static const struct gpu_ops gv11b_ops = {
.update_runlist = gk20a_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_num_fifos = gv11b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,

View File

@@ -323,6 +323,9 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.update_runlist = vgpu_fifo_update_runlist,
.trigger_mmu_fault = gm20b_fifo_trigger_mmu_fault,
.get_mmu_fault_info = gp10b_fifo_get_mmu_fault_info,
.get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc,
.get_mmu_fault_client_desc = gp10b_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.wait_engine_idle = vgpu_fifo_wait_engine_idle,
.get_num_fifos = gm20b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,

View File

@@ -362,6 +362,9 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.update_runlist = vgpu_fifo_update_runlist,
.trigger_mmu_fault = NULL,
.get_mmu_fault_info = NULL,
.get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL,
.wait_engine_idle = vgpu_fifo_wait_engine_idle,
.get_num_fifos = gv11b_fifo_get_num_fifos,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature,