mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: move nvgpu_report_gr_exception to common.gr.intr
Move the nvgpu_report_gr_exception call from gr_gk20a to gr_intr.c as nvgpu_gr_intr_report_exception Move local function gk20a_gr_get_channel_from_ctx to gr_intr.c as nvgpu_gr_intr_get_channel_from_ctx JIRA NVGPU-1891 Change-Id: I21521ad50989582d8f166a98a21ea3b1dcd3bbff Signed-off-by: Vinod G <vinodg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2098229 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
3a764030b1
commit
3d2942e412
@@ -24,6 +24,7 @@
|
|||||||
#include <nvgpu/io.h>
|
#include <nvgpu/io.h>
|
||||||
#include <nvgpu/channel.h>
|
#include <nvgpu/channel.h>
|
||||||
#include <nvgpu/regops.h>
|
#include <nvgpu/regops.h>
|
||||||
|
#include <nvgpu/nvgpu_err.h>
|
||||||
|
|
||||||
#include <nvgpu/gr/gr.h>
|
#include <nvgpu/gr/gr.h>
|
||||||
#include <nvgpu/gr/gr_intr.h>
|
#include <nvgpu/gr/gr_intr.h>
|
||||||
@@ -104,6 +105,128 @@ static int gr_intr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Used by sw interrupt thread to translate current ctx to chid.
|
||||||
|
* Also used by regops to translate current ctx to chid and tsgid.
|
||||||
|
* For performance, we don't want to go through 128 channels every time.
|
||||||
|
* curr_ctx should be the value read from gr falcon get_current_ctx op
|
||||||
|
* A small tlb is used here to cache translation.
|
||||||
|
*
|
||||||
|
* Returned channel must be freed with gk20a_channel_put() */
|
||||||
|
struct channel_gk20a *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
|
||||||
|
u32 curr_ctx, u32 *curr_tsgid)
|
||||||
|
{
|
||||||
|
struct fifo_gk20a *f = &g->fifo;
|
||||||
|
struct gr_gk20a *gr = &g->gr;
|
||||||
|
u32 chid;
|
||||||
|
u32 tsgid = NVGPU_INVALID_TSG_ID;
|
||||||
|
u32 i;
|
||||||
|
struct channel_gk20a *ret_ch = NULL;
|
||||||
|
|
||||||
|
/* when contexts are unloaded from GR, the valid bit is reset
|
||||||
|
* but the instance pointer information remains intact.
|
||||||
|
* This might be called from gr_isr where contexts might be
|
||||||
|
* unloaded. No need to check ctx_valid bit
|
||||||
|
*/
|
||||||
|
|
||||||
|
nvgpu_spinlock_acquire(&gr->ch_tlb_lock);
|
||||||
|
|
||||||
|
/* check cache first */
|
||||||
|
for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
|
||||||
|
if (gr->chid_tlb[i].curr_ctx == curr_ctx) {
|
||||||
|
chid = gr->chid_tlb[i].chid;
|
||||||
|
tsgid = gr->chid_tlb[i].tsgid;
|
||||||
|
ret_ch = gk20a_channel_from_id(g, chid);
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* slow path */
|
||||||
|
for (chid = 0; chid < f->num_channels; chid++) {
|
||||||
|
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
|
||||||
|
|
||||||
|
if (ch == NULL) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nvgpu_inst_block_ptr(g, &ch->inst_block) ==
|
||||||
|
g->ops.gr.falcon.get_ctx_ptr(curr_ctx)) {
|
||||||
|
tsgid = ch->tsgid;
|
||||||
|
/* found it */
|
||||||
|
ret_ch = ch;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
gk20a_channel_put(ch);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret_ch == NULL) {
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add to free tlb entry */
|
||||||
|
for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
|
||||||
|
if (gr->chid_tlb[i].curr_ctx == 0U) {
|
||||||
|
gr->chid_tlb[i].curr_ctx = curr_ctx;
|
||||||
|
gr->chid_tlb[i].chid = chid;
|
||||||
|
gr->chid_tlb[i].tsgid = tsgid;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* no free entry, flush one */
|
||||||
|
gr->chid_tlb[gr->channel_tlb_flush_index].curr_ctx = curr_ctx;
|
||||||
|
gr->chid_tlb[gr->channel_tlb_flush_index].chid = chid;
|
||||||
|
gr->chid_tlb[gr->channel_tlb_flush_index].tsgid = tsgid;
|
||||||
|
|
||||||
|
gr->channel_tlb_flush_index =
|
||||||
|
(gr->channel_tlb_flush_index + 1U) &
|
||||||
|
(GR_CHANNEL_MAP_TLB_SIZE - 1U);
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
nvgpu_spinlock_release(&gr->ch_tlb_lock);
|
||||||
|
if (curr_tsgid != NULL) {
|
||||||
|
*curr_tsgid = tsgid;
|
||||||
|
}
|
||||||
|
return ret_ch;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvgpu_gr_intr_report_exception(struct gk20a *g, u32 inst,
|
||||||
|
u32 err_type, u32 status)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
struct channel_gk20a *ch;
|
||||||
|
struct gr_exception_info err_info;
|
||||||
|
struct gr_err_info info;
|
||||||
|
u32 tsgid, chid, curr_ctx;
|
||||||
|
|
||||||
|
if (g->ops.gr.err_ops.report_gr_err == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsgid = NVGPU_INVALID_TSG_ID;
|
||||||
|
curr_ctx = g->ops.gr.falcon.get_current_ctx(g);
|
||||||
|
ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid);
|
||||||
|
chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
|
||||||
|
if (ch != NULL) {
|
||||||
|
gk20a_channel_put(ch);
|
||||||
|
}
|
||||||
|
|
||||||
|
(void) memset(&err_info, 0, sizeof(err_info));
|
||||||
|
(void) memset(&info, 0, sizeof(info));
|
||||||
|
err_info.curr_ctx = curr_ctx;
|
||||||
|
err_info.chid = chid;
|
||||||
|
err_info.tsgid = tsgid;
|
||||||
|
err_info.status = status;
|
||||||
|
info.exception_info = &err_info;
|
||||||
|
ret = g->ops.gr.err_ops.report_gr_err(g,
|
||||||
|
NVGPU_ERR_MODULE_PGRAPH, inst, err_type,
|
||||||
|
&info);
|
||||||
|
if (ret != 0) {
|
||||||
|
nvgpu_err(g, "Failed to report PGRAPH exception: "
|
||||||
|
"inst=%u, err_type=%u, status=%u",
|
||||||
|
inst, err_type, status);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int nvgpu_gr_intr_handle_gpc_exception(struct gk20a *g, bool *post_event,
|
int nvgpu_gr_intr_handle_gpc_exception(struct gk20a *g, bool *post_event,
|
||||||
struct nvgpu_gr_config *gr_config, struct channel_gk20a *fault_ch,
|
struct nvgpu_gr_config *gr_config, struct channel_gk20a *fault_ch,
|
||||||
u32 *hww_global_esr)
|
u32 *hww_global_esr)
|
||||||
|
|||||||
@@ -68,47 +68,6 @@
|
|||||||
#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
|
#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
|
||||||
#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
|
#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
|
||||||
|
|
||||||
static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
|
|
||||||
struct gk20a *g, u32 curr_ctx, u32 *curr_tsgid);
|
|
||||||
|
|
||||||
void nvgpu_report_gr_exception(struct gk20a *g, u32 inst,
|
|
||||||
u32 err_type, u32 status)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
struct channel_gk20a *ch;
|
|
||||||
struct gr_exception_info err_info;
|
|
||||||
struct gr_err_info info;
|
|
||||||
u32 tsgid, chid, curr_ctx;
|
|
||||||
|
|
||||||
if (g->ops.gr.err_ops.report_gr_err == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
tsgid = NVGPU_INVALID_TSG_ID;
|
|
||||||
curr_ctx = g->ops.gr.falcon.get_current_ctx(g);
|
|
||||||
ch = gk20a_gr_get_channel_from_ctx(g, curr_ctx, &tsgid);
|
|
||||||
chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
|
|
||||||
if (ch != NULL) {
|
|
||||||
gk20a_channel_put(ch);
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) memset(&err_info, 0, sizeof(err_info));
|
|
||||||
(void) memset(&info, 0, sizeof(info));
|
|
||||||
err_info.curr_ctx = curr_ctx;
|
|
||||||
err_info.chid = chid;
|
|
||||||
err_info.tsgid = tsgid;
|
|
||||||
err_info.status = status;
|
|
||||||
info.exception_info = &err_info;
|
|
||||||
ret = g->ops.gr.err_ops.report_gr_err(g,
|
|
||||||
NVGPU_ERR_MODULE_PGRAPH, inst, err_type,
|
|
||||||
&info);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "Failed to report PGRAPH exception: "
|
|
||||||
"inst=%u, err_type=%u, status=%u",
|
|
||||||
inst, err_type, status);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nvgpu_report_gr_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
|
static void nvgpu_report_gr_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
|
||||||
u32 sm, u32 hww_warp_esr_status, u64 hww_warp_esr_pc)
|
u32 sm, u32 hww_warp_esr_status, u64 hww_warp_esr_pc)
|
||||||
{
|
{
|
||||||
@@ -124,7 +83,7 @@ static void nvgpu_report_gr_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
|
|||||||
|
|
||||||
tsgid = NVGPU_INVALID_TSG_ID;
|
tsgid = NVGPU_INVALID_TSG_ID;
|
||||||
curr_ctx = g->ops.gr.falcon.get_current_ctx(g);
|
curr_ctx = g->ops.gr.falcon.get_current_ctx(g);
|
||||||
ch = gk20a_gr_get_channel_from_ctx(g, curr_ctx, &tsgid);
|
ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid);
|
||||||
chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
|
chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
|
||||||
if (ch != NULL) {
|
if (ch != NULL) {
|
||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
@@ -428,90 +387,6 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Used by sw interrupt thread to translate current ctx to chid.
|
|
||||||
* Also used by regops to translate current ctx to chid and tsgid.
|
|
||||||
* For performance, we don't want to go through 128 channels every time.
|
|
||||||
* curr_ctx should be the value read from gr falcon get_current_ctx op
|
|
||||||
* A small tlb is used here to cache translation.
|
|
||||||
*
|
|
||||||
* Returned channel must be freed with gk20a_channel_put() */
|
|
||||||
static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
|
|
||||||
struct gk20a *g, u32 curr_ctx, u32 *curr_tsgid)
|
|
||||||
{
|
|
||||||
struct fifo_gk20a *f = &g->fifo;
|
|
||||||
struct gr_gk20a *gr = &g->gr;
|
|
||||||
u32 chid;
|
|
||||||
u32 tsgid = NVGPU_INVALID_TSG_ID;
|
|
||||||
u32 i;
|
|
||||||
struct channel_gk20a *ret = NULL;
|
|
||||||
|
|
||||||
/* when contexts are unloaded from GR, the valid bit is reset
|
|
||||||
* but the instance pointer information remains intact.
|
|
||||||
* This might be called from gr_isr where contexts might be
|
|
||||||
* unloaded. No need to check ctx_valid bit
|
|
||||||
*/
|
|
||||||
|
|
||||||
nvgpu_spinlock_acquire(&gr->ch_tlb_lock);
|
|
||||||
|
|
||||||
/* check cache first */
|
|
||||||
for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
|
|
||||||
if (gr->chid_tlb[i].curr_ctx == curr_ctx) {
|
|
||||||
chid = gr->chid_tlb[i].chid;
|
|
||||||
tsgid = gr->chid_tlb[i].tsgid;
|
|
||||||
ret = gk20a_channel_from_id(g, chid);
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* slow path */
|
|
||||||
for (chid = 0; chid < f->num_channels; chid++) {
|
|
||||||
struct channel_gk20a *ch = gk20a_channel_from_id(g, chid);
|
|
||||||
|
|
||||||
if (ch == NULL) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nvgpu_inst_block_ptr(g, &ch->inst_block) ==
|
|
||||||
g->ops.gr.falcon.get_ctx_ptr(curr_ctx)) {
|
|
||||||
tsgid = ch->tsgid;
|
|
||||||
/* found it */
|
|
||||||
ret = ch;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
gk20a_channel_put(ch);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret == NULL) {
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* add to free tlb entry */
|
|
||||||
for (i = 0; i < GR_CHANNEL_MAP_TLB_SIZE; i++) {
|
|
||||||
if (gr->chid_tlb[i].curr_ctx == 0U) {
|
|
||||||
gr->chid_tlb[i].curr_ctx = curr_ctx;
|
|
||||||
gr->chid_tlb[i].chid = chid;
|
|
||||||
gr->chid_tlb[i].tsgid = tsgid;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* no free entry, flush one */
|
|
||||||
gr->chid_tlb[gr->channel_tlb_flush_index].curr_ctx = curr_ctx;
|
|
||||||
gr->chid_tlb[gr->channel_tlb_flush_index].chid = chid;
|
|
||||||
gr->chid_tlb[gr->channel_tlb_flush_index].tsgid = tsgid;
|
|
||||||
|
|
||||||
gr->channel_tlb_flush_index =
|
|
||||||
(gr->channel_tlb_flush_index + 1U) &
|
|
||||||
(GR_CHANNEL_MAP_TLB_SIZE - 1U);
|
|
||||||
|
|
||||||
unlock:
|
|
||||||
nvgpu_spinlock_release(&gr->ch_tlb_lock);
|
|
||||||
if (curr_tsgid != NULL) {
|
|
||||||
*curr_tsgid = tsgid;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gk20a_gr_lock_down_sm(struct gk20a *g,
|
int gk20a_gr_lock_down_sm(struct gk20a *g,
|
||||||
u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask,
|
u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask,
|
||||||
bool check_errors)
|
bool check_errors)
|
||||||
@@ -703,7 +578,7 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
|
|
||||||
g->ops.gr.intr.trapped_method_info(g, &isr_data);
|
g->ops.gr.intr.trapped_method_info(g, &isr_data);
|
||||||
|
|
||||||
ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid);
|
ch = nvgpu_gr_intr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid);
|
||||||
isr_data.ch = ch;
|
isr_data.ch = ch;
|
||||||
chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
|
chid = ch != NULL ? ch->chid : FIFO_INVAL_CHANNEL_ID;
|
||||||
|
|
||||||
@@ -2019,7 +1894,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
curr_ch = gk20a_gr_get_channel_from_ctx(g, curr_gr_ctx,
|
curr_ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_gr_ctx,
|
||||||
&curr_gr_tsgid);
|
&curr_gr_tsgid);
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
|
||||||
|
|||||||
@@ -25,13 +25,9 @@
|
|||||||
#define GR_GK20A_H
|
#define GR_GK20A_H
|
||||||
|
|
||||||
#include <nvgpu/types.h>
|
#include <nvgpu/types.h>
|
||||||
|
|
||||||
#include "mm_gk20a.h"
|
|
||||||
|
|
||||||
#include <nvgpu/comptags.h>
|
|
||||||
#include <nvgpu/cond.h>
|
#include <nvgpu/cond.h>
|
||||||
|
|
||||||
#define INVALID_MAX_WAYS 0xFFFFFFFFU
|
#include "mm_gk20a.h"
|
||||||
|
|
||||||
#define GK20A_TIMEOUT_FPGA 100000U /* 100 sec */
|
#define GK20A_TIMEOUT_FPGA 100000U /* 100 sec */
|
||||||
|
|
||||||
@@ -47,6 +43,7 @@ struct nvgpu_gr_zbc;
|
|||||||
struct nvgpu_gr_hwpm_map;
|
struct nvgpu_gr_hwpm_map;
|
||||||
struct nvgpu_gr_isr_data;
|
struct nvgpu_gr_isr_data;
|
||||||
struct nvgpu_gr_ctx_desc;
|
struct nvgpu_gr_ctx_desc;
|
||||||
|
struct dbg_session_gk20a;
|
||||||
|
|
||||||
enum ctxsw_addr_type;
|
enum ctxsw_addr_type;
|
||||||
|
|
||||||
@@ -256,8 +253,6 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
|
|||||||
u32 gk20a_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm);
|
u32 gk20a_gr_get_sm_hww_warp_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm);
|
||||||
u32 gk20a_gr_get_sm_hww_global_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm);
|
u32 gk20a_gr_get_sm_hww_global_esr(struct gk20a *g, u32 gpc, u32 tpc, u32 sm);
|
||||||
|
|
||||||
struct dbg_session_gk20a;
|
|
||||||
|
|
||||||
bool gr_gk20a_suspend_context(struct channel_gk20a *ch);
|
bool gr_gk20a_suspend_context(struct channel_gk20a *ch);
|
||||||
bool gr_gk20a_resume_context(struct channel_gk20a *ch);
|
bool gr_gk20a_resume_context(struct channel_gk20a *ch);
|
||||||
int gr_gk20a_suspend_contexts(struct gk20a *g,
|
int gr_gk20a_suspend_contexts(struct gk20a *g,
|
||||||
|
|||||||
@@ -172,7 +172,7 @@ bool gm20b_gr_intr_handle_exceptions(struct gk20a *g, bool *is_gpc_exception)
|
|||||||
u32 fe = nvgpu_readl(g, gr_fe_hww_esr_r());
|
u32 fe = nvgpu_readl(g, gr_fe_hww_esr_r());
|
||||||
u32 info = nvgpu_readl(g, gr_fe_hww_esr_info_r());
|
u32 info = nvgpu_readl(g, gr_fe_hww_esr_info_r());
|
||||||
|
|
||||||
nvgpu_report_gr_exception(g, 0,
|
nvgpu_gr_intr_report_exception(g, 0,
|
||||||
GPU_PGRAPH_FE_EXCEPTION,
|
GPU_PGRAPH_FE_EXCEPTION,
|
||||||
fe);
|
fe);
|
||||||
nvgpu_err(g, "fe exception: esr 0x%08x, info 0x%08x",
|
nvgpu_err(g, "fe exception: esr 0x%08x, info 0x%08x",
|
||||||
@@ -185,7 +185,7 @@ bool gm20b_gr_intr_handle_exceptions(struct gk20a *g, bool *is_gpc_exception)
|
|||||||
if ((exception & gr_exception_memfmt_m()) != 0U) {
|
if ((exception & gr_exception_memfmt_m()) != 0U) {
|
||||||
u32 memfmt = nvgpu_readl(g, gr_memfmt_hww_esr_r());
|
u32 memfmt = nvgpu_readl(g, gr_memfmt_hww_esr_r());
|
||||||
|
|
||||||
nvgpu_report_gr_exception(g, 0,
|
nvgpu_gr_intr_report_exception(g, 0,
|
||||||
GPU_PGRAPH_MEMFMT_EXCEPTION,
|
GPU_PGRAPH_MEMFMT_EXCEPTION,
|
||||||
memfmt);
|
memfmt);
|
||||||
nvgpu_err(g, "memfmt exception: esr %08x", memfmt);
|
nvgpu_err(g, "memfmt exception: esr %08x", memfmt);
|
||||||
@@ -197,7 +197,7 @@ bool gm20b_gr_intr_handle_exceptions(struct gk20a *g, bool *is_gpc_exception)
|
|||||||
if ((exception & gr_exception_pd_m()) != 0U) {
|
if ((exception & gr_exception_pd_m()) != 0U) {
|
||||||
u32 pd = nvgpu_readl(g, gr_pd_hww_esr_r());
|
u32 pd = nvgpu_readl(g, gr_pd_hww_esr_r());
|
||||||
|
|
||||||
nvgpu_report_gr_exception(g, 0,
|
nvgpu_gr_intr_report_exception(g, 0,
|
||||||
GPU_PGRAPH_PD_EXCEPTION,
|
GPU_PGRAPH_PD_EXCEPTION,
|
||||||
pd);
|
pd);
|
||||||
nvgpu_err(g, "pd exception: esr 0x%08x", pd);
|
nvgpu_err(g, "pd exception: esr 0x%08x", pd);
|
||||||
@@ -209,7 +209,7 @@ bool gm20b_gr_intr_handle_exceptions(struct gk20a *g, bool *is_gpc_exception)
|
|||||||
if ((exception & gr_exception_scc_m()) != 0U) {
|
if ((exception & gr_exception_scc_m()) != 0U) {
|
||||||
u32 scc = nvgpu_readl(g, gr_scc_hww_esr_r());
|
u32 scc = nvgpu_readl(g, gr_scc_hww_esr_r());
|
||||||
|
|
||||||
nvgpu_report_gr_exception(g, 0,
|
nvgpu_gr_intr_report_exception(g, 0,
|
||||||
GPU_PGRAPH_SCC_EXCEPTION,
|
GPU_PGRAPH_SCC_EXCEPTION,
|
||||||
scc);
|
scc);
|
||||||
nvgpu_err(g, "scc exception: esr 0x%08x", scc);
|
nvgpu_err(g, "scc exception: esr 0x%08x", scc);
|
||||||
@@ -221,7 +221,7 @@ bool gm20b_gr_intr_handle_exceptions(struct gk20a *g, bool *is_gpc_exception)
|
|||||||
if ((exception & gr_exception_ds_m()) != 0U) {
|
if ((exception & gr_exception_ds_m()) != 0U) {
|
||||||
u32 ds = nvgpu_readl(g, gr_ds_hww_esr_r());
|
u32 ds = nvgpu_readl(g, gr_ds_hww_esr_r());
|
||||||
|
|
||||||
nvgpu_report_gr_exception(g, 0,
|
nvgpu_gr_intr_report_exception(g, 0,
|
||||||
GPU_PGRAPH_DS_EXCEPTION,
|
GPU_PGRAPH_DS_EXCEPTION,
|
||||||
ds);
|
ds);
|
||||||
nvgpu_err(g, "ds exception: esr: 0x%08x", ds);
|
nvgpu_err(g, "ds exception: esr: 0x%08x", ds);
|
||||||
@@ -241,7 +241,7 @@ bool gm20b_gr_intr_handle_exceptions(struct gk20a *g, bool *is_gpc_exception)
|
|||||||
} else {
|
} else {
|
||||||
nvgpu_err(g, "unhandled ssync exception");
|
nvgpu_err(g, "unhandled ssync exception");
|
||||||
}
|
}
|
||||||
nvgpu_report_gr_exception(g, 0,
|
nvgpu_gr_intr_report_exception(g, 0,
|
||||||
GPU_PGRAPH_SSYNC_EXCEPTION,
|
GPU_PGRAPH_SSYNC_EXCEPTION,
|
||||||
ssync_esr);
|
ssync_esr);
|
||||||
}
|
}
|
||||||
@@ -250,7 +250,7 @@ bool gm20b_gr_intr_handle_exceptions(struct gk20a *g, bool *is_gpc_exception)
|
|||||||
u32 mme = nvgpu_readl(g, gr_mme_hww_esr_r());
|
u32 mme = nvgpu_readl(g, gr_mme_hww_esr_r());
|
||||||
u32 info = nvgpu_readl(g, gr_mme_hww_esr_info_r());
|
u32 info = nvgpu_readl(g, gr_mme_hww_esr_info_r());
|
||||||
|
|
||||||
nvgpu_report_gr_exception(g, 0,
|
nvgpu_gr_intr_report_exception(g, 0,
|
||||||
GPU_PGRAPH_MME_EXCEPTION,
|
GPU_PGRAPH_MME_EXCEPTION,
|
||||||
mme);
|
mme);
|
||||||
nvgpu_err(g, "mme exception: esr 0x%08x info:0x%08x",
|
nvgpu_err(g, "mme exception: esr 0x%08x info:0x%08x",
|
||||||
@@ -267,7 +267,7 @@ bool gm20b_gr_intr_handle_exceptions(struct gk20a *g, bool *is_gpc_exception)
|
|||||||
if ((exception & gr_exception_sked_m()) != 0U) {
|
if ((exception & gr_exception_sked_m()) != 0U) {
|
||||||
u32 sked = nvgpu_readl(g, gr_sked_hww_esr_r());
|
u32 sked = nvgpu_readl(g, gr_sked_hww_esr_r());
|
||||||
|
|
||||||
nvgpu_report_gr_exception(g, 0,
|
nvgpu_gr_intr_report_exception(g, 0,
|
||||||
GPU_PGRAPH_SKED_EXCEPTION,
|
GPU_PGRAPH_SKED_EXCEPTION,
|
||||||
sked);
|
sked);
|
||||||
nvgpu_err(g, "sked exception: esr 0x%08x", sked);
|
nvgpu_err(g, "sked exception: esr 0x%08x", sked);
|
||||||
|
|||||||
@@ -22,10 +22,10 @@
|
|||||||
|
|
||||||
#include <nvgpu/gk20a.h>
|
#include <nvgpu/gk20a.h>
|
||||||
#include <nvgpu/io.h>
|
#include <nvgpu/io.h>
|
||||||
#include <nvgpu/nvgpu_err.h>
|
|
||||||
|
|
||||||
#include <nvgpu/gr/config.h>
|
#include <nvgpu/gr/config.h>
|
||||||
#include <nvgpu/gr/gr.h>
|
#include <nvgpu/gr/gr.h>
|
||||||
|
#include <nvgpu/gr/gr_intr.h>
|
||||||
|
|
||||||
#include "gr_intr_gv11b.h"
|
#include "gr_intr_gv11b.h"
|
||||||
|
|
||||||
@@ -355,7 +355,7 @@ void gv11b_gr_intr_handle_tpc_mpc_exception(struct gk20a *g, u32 gpc, u32 tpc)
|
|||||||
esr = nvgpu_readl(g, gr_gpc0_tpc0_mpc_hww_esr_r() + offset);
|
esr = nvgpu_readl(g, gr_gpc0_tpc0_mpc_hww_esr_r() + offset);
|
||||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "mpc hww esr 0x%08x", esr);
|
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "mpc hww esr 0x%08x", esr);
|
||||||
|
|
||||||
nvgpu_report_gr_exception(g, ((gpc << 8U) | tpc),
|
nvgpu_gr_intr_report_exception(g, ((gpc << 8U) | tpc),
|
||||||
GPU_PGRAPH_MPC_EXCEPTION,
|
GPU_PGRAPH_MPC_EXCEPTION,
|
||||||
esr);
|
esr);
|
||||||
|
|
||||||
|
|||||||
@@ -63,4 +63,8 @@ int nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
|
|||||||
struct nvgpu_gr_isr_data *isr_data);
|
struct nvgpu_gr_isr_data *isr_data);
|
||||||
int nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g,
|
int nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g,
|
||||||
struct nvgpu_gr_isr_data *isr_data);
|
struct nvgpu_gr_isr_data *isr_data);
|
||||||
|
void nvgpu_gr_intr_report_exception(struct gk20a *g, u32 inst,
|
||||||
|
u32 err_type, u32 status);
|
||||||
|
struct channel_gk20a *nvgpu_gr_intr_get_channel_from_ctx(struct gk20a *g,
|
||||||
|
u32 curr_ctx, u32 *curr_tsgid);
|
||||||
#endif /* NVGPU_GR_INTR_H */
|
#endif /* NVGPU_GR_INTR_H */
|
||||||
|
|||||||
@@ -178,9 +178,6 @@ struct gr_err_info {
|
|||||||
void nvgpu_report_host_error(struct gk20a *g,
|
void nvgpu_report_host_error(struct gk20a *g,
|
||||||
u32 inst, u32 err_id, u32 intr_info);
|
u32 inst, u32 err_id, u32 intr_info);
|
||||||
|
|
||||||
void nvgpu_report_gr_exception(struct gk20a *g, u32 inst,
|
|
||||||
u32 err_type, u32 status);
|
|
||||||
|
|
||||||
void nvgpu_report_ce_error(struct gk20a *g, u32 inst,
|
void nvgpu_report_ce_error(struct gk20a *g, u32 inst,
|
||||||
u32 err_type, u32 status);
|
u32 err_type, u32 status);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user