gpu: nvgpu: modify handle_pbdma_intr* functions

RC_TYPE_PBDMA_FAULT is the only recovery type for all the pbdma intr
functions. Thus, rc_type variable is changed to a boolean type
in all implementations of handle_pbdma_intr* functions.

"handled" variable is unused and removed from all the implementations of
handle_pbdma_intr* functions.

handle_pbdma_intr* HAL ops are renamed to handle_intr*.

Jira NVGPU-2950

Change-Id: I9605d930225a38ed76f25b6a94cb02d855f522dd
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2083748
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-03-28 14:05:30 +05:30
committed by mobile promotions
parent 0f1726ae1f
commit b1ceb5c4d2
14 changed files with 93 additions and 103 deletions

View File

@@ -453,9 +453,9 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = NULL,
.handle_pbdma_intr_0 = NULL,
.handle_pbdma_intr_1 = gm20b_pbdma_handle_intr_1,
.handle_pbdma_intr = gm20b_pbdma_handle_intr,
.handle_intr_0 = NULL,
.handle_intr_1 = gm20b_pbdma_handle_intr_1,
.handle_intr = gm20b_pbdma_handle_intr,
.read_pbdma_data = NULL,
.reset_pbdma_header = NULL,
.device_fatal_0_intr_descs = NULL,

View File

@@ -532,9 +532,9 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = NULL,
.handle_pbdma_intr_0 = NULL,
.handle_pbdma_intr_1 = gv11b_pbdma_handle_intr_1,
.handle_pbdma_intr = gm20b_pbdma_handle_intr,
.handle_intr_0 = NULL,
.handle_intr_1 = gv11b_pbdma_handle_intr_1,
.handle_intr = gm20b_pbdma_handle_intr,
.read_pbdma_data = NULL,
.reset_pbdma_header = NULL,
.device_fatal_0_intr_descs = NULL,

View File

@@ -632,9 +632,9 @@ static const struct gpu_ops gm20b_ops = {
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gm20b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,
.handle_pbdma_intr_0 = gm20b_pbdma_handle_intr_0,
.handle_pbdma_intr_1 = gm20b_pbdma_handle_intr_1,
.handle_pbdma_intr = gm20b_pbdma_handle_intr,
.handle_intr_0 = gm20b_pbdma_handle_intr_0,
.handle_intr_1 = gm20b_pbdma_handle_intr_1,
.handle_intr = gm20b_pbdma_handle_intr,
.read_pbdma_data = gm20b_pbdma_read_data,
.reset_pbdma_header = gm20b_pbdma_reset_header,
.device_fatal_0_intr_descs =

View File

@@ -712,9 +712,9 @@ static const struct gpu_ops gp10b_ops = {
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,
.handle_pbdma_intr_0 = gm20b_pbdma_handle_intr_0,
.handle_pbdma_intr_1 = gm20b_pbdma_handle_intr_1,
.handle_pbdma_intr = gm20b_pbdma_handle_intr,
.handle_intr_0 = gm20b_pbdma_handle_intr_0,
.handle_intr_1 = gm20b_pbdma_handle_intr_1,
.handle_intr = gm20b_pbdma_handle_intr,
.read_pbdma_data = gm20b_pbdma_read_data,
.reset_pbdma_header = gm20b_pbdma_reset_header,
.device_fatal_0_intr_descs =

View File

@@ -896,9 +896,9 @@ static const struct gpu_ops gv100_ops = {
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,
.handle_pbdma_intr_0 = gv11b_pbdma_handle_intr_0,
.handle_pbdma_intr_1 = gv11b_pbdma_handle_intr_1,
.handle_pbdma_intr = gm20b_pbdma_handle_intr,
.handle_intr_0 = gv11b_pbdma_handle_intr_0,
.handle_intr_1 = gv11b_pbdma_handle_intr_1,
.handle_intr = gm20b_pbdma_handle_intr,
.read_pbdma_data = gm20b_pbdma_read_data,
.reset_pbdma_header = gm20b_pbdma_reset_header,
.device_fatal_0_intr_descs =

View File

@@ -189,8 +189,8 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
* reported to SW.
*/
/* Ignore un-needed return value "handled" */
(void)g->ops.pbdma.handle_pbdma_intr(g, pbdma_id, NULL);
/* Ignore un-needed return value "recover" */
(void)g->ops.pbdma.handle_intr(g, pbdma_id, NULL);
g->ops.pbdma_status.read_pbdma_status_info(g, pbdma_id,
&pbdma_status);

View File

@@ -851,9 +851,9 @@ static const struct gpu_ops gv11b_ops = {
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,
.handle_pbdma_intr_0 = gv11b_pbdma_handle_intr_0,
.handle_pbdma_intr_1 = gv11b_pbdma_handle_intr_1,
.handle_pbdma_intr = gm20b_pbdma_handle_intr,
.handle_intr_0 = gv11b_pbdma_handle_intr_0,
.handle_intr_1 = gv11b_pbdma_handle_intr_1,
.handle_intr = gm20b_pbdma_handle_intr,
.read_pbdma_data = gm20b_pbdma_read_data,
.reset_pbdma_header = gm20b_pbdma_reset_header,
.device_fatal_0_intr_descs =

View File

@@ -211,15 +211,15 @@ u32 gk20a_fifo_pbdma_isr(struct gk20a *g)
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
u32 pbdma_pending_bitmask = nvgpu_readl(g, fifo_intr_pbdma_id_r());
u32 error_notifier;
unsigned int rc_type;
bool recover;
for (pbdma_id = 0; pbdma_id < num_pbdma; pbdma_id++) {
if (fifo_intr_pbdma_id_status_v(pbdma_pending_bitmask, pbdma_id) != 0U) {
nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending",
pbdma_id);
rc_type = g->ops.pbdma.handle_pbdma_intr(g, pbdma_id,
recover = g->ops.pbdma.handle_intr(g, pbdma_id,
&error_notifier);
if (rc_type == RC_TYPE_PBDMA_FAULT) {
if (recover) {
gk20a_fifo_pbdma_fault_rc(g, f, pbdma_id,
error_notifier);
}

View File

@@ -130,11 +130,11 @@ void gm20b_pbdma_intr_enable(struct gk20a *g, bool enable)
}
}
unsigned int gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_0, u32 *handled, u32 *error_notifier)
bool gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_0, u32 *error_notifier)
{
struct fifo_gk20a *f = &g->fifo;
unsigned int rc_type = RC_TYPE_NO_RC;
bool recover = false;
u32 i;
unsigned long pbdma_intr_err;
unsigned long bit;
@@ -164,11 +164,7 @@ unsigned int gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
nvgpu_readl(g, pbdma_method3_r(pbdma_id))
);
rc_type = RC_TYPE_PBDMA_FAULT;
*handled |= ((f->intr.pbdma.device_fatal_0 |
f->intr.pbdma.channel_fatal_0 |
f->intr.pbdma.restartable_0) &
pbdma_intr_0);
recover = true;
}
if ((pbdma_intr_0 & pbdma_intr_0_acquire_pending_f()) != 0U) {
@@ -177,29 +173,34 @@ unsigned int gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
val &= ~pbdma_acquire_timeout_en_enable_f();
nvgpu_writel(g, pbdma_acquire_r(pbdma_id), val);
if (nvgpu_is_timeouts_enabled(g)) {
rc_type = RC_TYPE_PBDMA_FAULT;
nvgpu_err(g,
"semaphore acquire timeout!");
*error_notifier = NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT;
recover = true;
nvgpu_err(g, "semaphore acquire timeout!");
/*
* Note: the error_notifier can be overwritten if
* semaphore_timeout is triggered with pbcrc_pending
* interrupt below
*/
*error_notifier =
NVGPU_ERR_NOTIFIER_GR_SEMAPHORE_TIMEOUT;
}
*handled |= pbdma_intr_0_acquire_pending_f();
}
if ((pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) != 0U) {
g->ops.pbdma.reset_pbdma_header(g, pbdma_id);
gm20b_pbdma_reset_method(g, pbdma_id, 0);
rc_type = RC_TYPE_PBDMA_FAULT;
recover = true;
}
if ((pbdma_intr_0 & pbdma_intr_0_method_pending_f()) != 0U) {
gm20b_pbdma_reset_method(g, pbdma_id, 0);
rc_type = RC_TYPE_PBDMA_FAULT;
recover = true;
}
if ((pbdma_intr_0 & pbdma_intr_0_pbcrc_pending_f()) != 0U) {
*error_notifier =
NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH;
rc_type = RC_TYPE_PBDMA_FAULT;
recover = true;
}
if ((pbdma_intr_0 & pbdma_intr_0_device_pending_f()) != 0U) {
@@ -212,18 +213,16 @@ unsigned int gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
pbdma_id, i);
}
}
rc_type = RC_TYPE_PBDMA_FAULT;
recover = true;
}
return rc_type;
return recover;
}
unsigned int gm20b_pbdma_handle_intr_1(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_1,
u32 *handled, u32 *error_notifier)
bool gm20b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_1,
u32 *error_notifier)
{
unsigned int rc_type = RC_TYPE_PBDMA_FAULT;
bool recover = true;
/*
* all of the interrupts in _intr_1 are "host copy engine"
* related, which is not supported. For now just make them
@@ -231,9 +230,8 @@ unsigned int gm20b_pbdma_handle_intr_1(struct gk20a *g,
*/
nvgpu_err(g, "hce err: pbdma_intr_1(%d):0x%08x",
pbdma_id, pbdma_intr_1);
*handled |= pbdma_intr_1;
return rc_type;
return recover;
}
void gm20b_pbdma_reset_header(struct gk20a *g, u32 pbdma_id)
@@ -420,25 +418,24 @@ u32 gm20b_pbdma_restartable_0_intr_descs(void)
return restartable_0_intr_descs;
}
unsigned int gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id,
bool gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id,
u32 *error_notifier)
{
u32 intr_handled = 0U;
u32 intr_error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_ERROR;
u32 pbdma_intr_0 = nvgpu_readl(g, pbdma_intr_0_r(pbdma_id));
u32 pbdma_intr_1 = nvgpu_readl(g, pbdma_intr_1_r(pbdma_id));
unsigned int rc_type = RC_TYPE_NO_RC;
bool recover = false;
if (pbdma_intr_0 != 0U) {
nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr,
"pbdma id %d intr_0 0x%08x pending",
pbdma_id, pbdma_intr_0);
if (g->ops.pbdma.handle_pbdma_intr_0(g, pbdma_id, pbdma_intr_0,
&intr_handled, &intr_error_notifier) != RC_TYPE_NO_RC) {
rc_type = RC_TYPE_PBDMA_FAULT;
if (g->ops.pbdma.handle_intr_0(g, pbdma_id, pbdma_intr_0,
&intr_error_notifier)) {
recover = true;
}
nvgpu_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
}
@@ -448,9 +445,9 @@ unsigned int gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id,
"pbdma id %d intr_1 0x%08x pending",
pbdma_id, pbdma_intr_1);
if (g->ops.pbdma.handle_pbdma_intr_1(g, pbdma_id, pbdma_intr_1,
&intr_handled, &intr_error_notifier) != RC_TYPE_NO_RC) {
rc_type = RC_TYPE_PBDMA_FAULT;
if (g->ops.pbdma.handle_intr_1(g, pbdma_id, pbdma_intr_1,
&intr_error_notifier)) {
recover = true;
}
nvgpu_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
}
@@ -459,5 +456,5 @@ unsigned int gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id,
*error_notifier = intr_error_notifier;
}
return rc_type;
return recover;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -30,10 +30,12 @@ struct gk20a_debug_output;
void gm20b_pbdma_intr_enable(struct gk20a *g, bool enable);
unsigned int gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_0, u32 *handled, u32 *error_notifier);
unsigned int gm20b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_1, u32 *handled, u32 *error_notifier);
bool gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_0, u32 *error_notifier);
bool gm20b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id,
u32 pbdma_intr_1, u32 *error_notifier);
bool gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id,
u32 *error_notifier);
u32 gm20b_pbdma_get_signature(struct gk20a *g);
u32 gm20b_pbdma_read_data(struct gk20a *g, u32 pbdma_id);
void gm20b_pbdma_reset_header(struct gk20a *g, u32 pbdma_id);
@@ -48,7 +50,5 @@ u32 gm20b_pbdma_restartable_0_intr_descs(void);
void gm20b_pbdma_clear_all_intr(struct gk20a *g, u32 pbdma_id);
void gm20b_pbdma_disable_and_clear_all_intr(struct gk20a *g);
unsigned int gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id,
u32 *error_notifier);
#endif /* NVGPU_PBDMA_GM20B_H */

View File

@@ -124,31 +124,26 @@ void gv11b_pbdma_intr_enable(struct gk20a *g, bool enable)
}
}
unsigned int gv11b_pbdma_handle_intr_0(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_0,
u32 *handled, u32 *error_notifier)
bool gv11b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_0,
u32 *error_notifier)
{
unsigned int rc_type = RC_TYPE_NO_RC;
rc_type = gm20b_pbdma_handle_intr_0(g, pbdma_id,
pbdma_intr_0, handled, error_notifier);
bool recover = gm20b_pbdma_handle_intr_0(g, pbdma_id,
pbdma_intr_0, error_notifier);
if ((pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) != 0U) {
nvgpu_log(g, gpu_dbg_intr, "clear faulted error on pbdma id %d",
pbdma_id);
gm20b_pbdma_reset_method(g, pbdma_id, 0);
*handled |= pbdma_intr_0_clear_faulted_error_pending_f();
rc_type = RC_TYPE_PBDMA_FAULT;
recover = true;
}
if ((pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) != 0U) {
nvgpu_log(g, gpu_dbg_intr, "eng reset intr on pbdma id %d",
pbdma_id);
*handled |= pbdma_intr_0_eng_reset_pending_f();
rc_type = RC_TYPE_PBDMA_FAULT;
recover = true;
}
report_pbdma_error(g, pbdma_id, pbdma_intr_0);
return rc_type;
return recover;
}
/*
@@ -173,11 +168,11 @@ unsigned int gv11b_pbdma_handle_intr_0(struct gk20a *g,
* will have to be destroyed.
*/
unsigned int gv11b_pbdma_handle_intr_1(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_1,
u32 *handled, u32 *error_notifier)
bool gv11b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_1,
u32 *error_notifier)
{
unsigned int rc_type = RC_TYPE_PBDMA_FAULT;
bool recover = false;
u32 pbdma_intr_1_current = gk20a_readl(g, pbdma_intr_1_r(pbdma_id));
/* minimize race with the gpu clearing the pending interrupt */
@@ -187,9 +182,11 @@ unsigned int gv11b_pbdma_handle_intr_1(struct gk20a *g,
}
if (pbdma_intr_1 == 0U) {
return RC_TYPE_NO_RC;
return recover;
}
recover = true;
nvgpu_report_host_error(g, pbdma_id,
GPU_HOST_PBDMA_HCE_ERROR, pbdma_intr_1);
@@ -198,7 +195,6 @@ unsigned int gv11b_pbdma_handle_intr_1(struct gk20a *g,
pbdma_id);
nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ",
pbdma_id, pbdma_intr_1);
*handled |= pbdma_intr_1_ctxnotvalid_pending_f();
} else{
/*
* rest of the interrupts in _intr_1 are "host copy engine"
@@ -207,10 +203,9 @@ unsigned int gv11b_pbdma_handle_intr_1(struct gk20a *g,
*/
nvgpu_err(g, "hce err: pbdma_intr_1(%d):0x%08x",
pbdma_id, pbdma_intr_1);
*handled |= pbdma_intr_1;
}
return rc_type;
return recover;
}
u32 gv11b_pbdma_channel_fatal_0_intr_descs(void)

View File

@@ -28,12 +28,10 @@
struct gk20a;
void gv11b_pbdma_intr_enable(struct gk20a *g, bool enable);
unsigned int gv11b_pbdma_handle_intr_0(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_0,
u32 *handled, u32 *error_notifier);
unsigned int gv11b_pbdma_handle_intr_1(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_1,
u32 *handled, u32 *error_notifier);
bool gv11b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_0,
u32 *error_notifier);
bool gv11b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_1,
u32 *error_notifier);
u32 gv11b_pbdma_channel_fatal_0_intr_descs(void);
#endif /* NVGPU_PBDMA_GV11B_H */

View File

@@ -1027,12 +1027,15 @@ struct gpu_ops {
struct {
void (*intr_enable)(struct gk20a *g, bool enable);
unsigned int (*handle_pbdma_intr_0)(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_0,
u32 *handled, u32 *error_notifier);
unsigned int (*handle_pbdma_intr_1)(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_1,
u32 *handled, u32 *error_notifier);
bool (*handle_intr_0)(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_0,
u32 *error_notifier);
bool (*handle_intr_1)(struct gk20a *g,
u32 pbdma_id, u32 pbdma_intr_1,
u32 *error_notifier);
/* error_notifier can be NULL */
bool (*handle_intr)(struct gk20a *g, u32 pbdma_id,
u32 *error_notifier);
u32 (*get_pbdma_signature)(struct gk20a *g);
void (*dump_pbdma_status)(struct gk20a *g,
struct gk20a_debug_output *o);
@@ -1042,9 +1045,6 @@ struct gpu_ops {
u32 (*device_fatal_0_intr_descs)(void);
u32 (*channel_fatal_0_intr_descs)(void);
u32 (*restartable_0_intr_descs)(void);
/* error_notifier can be NULL */
unsigned int (*handle_pbdma_intr)(struct gk20a *g, u32 pbdma_id,
u32 *error_notifier);
} pbdma;
struct {

View File

@@ -934,9 +934,9 @@ static const struct gpu_ops tu104_ops = {
.pbdma_acquire_val = gm20b_pbdma_acquire_val,
.get_pbdma_signature = gp10b_pbdma_get_signature,
.dump_pbdma_status = gm20b_pbdma_dump_status,
.handle_pbdma_intr = gm20b_pbdma_handle_intr,
.handle_pbdma_intr_0 = gv11b_pbdma_handle_intr_0,
.handle_pbdma_intr_1 = gv11b_pbdma_handle_intr_1,
.handle_intr = gm20b_pbdma_handle_intr,
.handle_intr_0 = gv11b_pbdma_handle_intr_0,
.handle_intr_1 = gv11b_pbdma_handle_intr_1,
.read_pbdma_data = tu104_pbdma_read_data,
.reset_pbdma_header = tu104_pbdma_reset_header,
.device_fatal_0_intr_descs =