mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
gpu: nvgpu: add BVEC test for nvgpu_rc_pbdma_fault
Update nvgpu_rc_pbdma_fault with invalid checks and add BVEC test for it. Make ga10b_fifo_pbdma_isr static. NVGPU-6772 Change-Id: I5485760c53e1fff1278557a5b25659a1fc0e4eaf Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2551617 (cherry picked from commit e917042d395d07cb902580bad3d5a7d0096cc303) Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2623625 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
d8e8eb65d3
commit
80efe558b1
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -42,6 +42,11 @@ bool nvgpu_pbdma_status_is_chsw_valid(struct nvgpu_pbdma_status_info
|
||||
{
|
||||
return pbdma_status->chsw_status == NVGPU_PBDMA_CHSW_STATUS_VALID;
|
||||
}
|
||||
bool nvgpu_pbdma_status_ch_not_loaded(struct nvgpu_pbdma_status_info
|
||||
*pbdma_status)
|
||||
{
|
||||
return pbdma_status->chsw_status == NVGPU_PBDMA_CHSW_STATUS_INVALID;
|
||||
}
|
||||
bool nvgpu_pbdma_status_is_id_type_tsg(struct nvgpu_pbdma_status_info
|
||||
*pbdma_status)
|
||||
{
|
||||
|
||||
@@ -621,9 +621,8 @@ void nvgpu_tsg_set_error_notifier(struct gk20a *g, struct nvgpu_tsg *tsg,
|
||||
u32 error_notifier)
|
||||
{
|
||||
struct nvgpu_channel *ch = NULL;
|
||||
u32 max_error_notifier_id = NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH;
|
||||
|
||||
if (error_notifier > max_error_notifier_id) {
|
||||
if (error_notifier >= NVGPU_ERR_NOTIFIER_INVAL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <nvgpu/error_notifier.h>
|
||||
#include <nvgpu/pbdma_status.h>
|
||||
#include <nvgpu/rc.h>
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
|
||||
void nvgpu_rc_fifo_recover(struct gk20a *g, u32 eng_bitmask,
|
||||
u32 hw_id, bool id_is_tsg,
|
||||
@@ -94,11 +95,18 @@ void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
|
||||
#endif
|
||||
}
|
||||
|
||||
void nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, u32 error_notifier,
|
||||
int nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, enum nvgpu_err_notif error_notifier,
|
||||
struct nvgpu_pbdma_status_info *pbdma_status)
|
||||
{
|
||||
u32 id;
|
||||
u32 id_type = PBDMA_STATUS_ID_TYPE_INVALID;
|
||||
int err = 0;
|
||||
u32 id;
|
||||
|
||||
if (error_notifier >= NVGPU_ERR_NOTIFIER_INVAL) {
|
||||
nvgpu_err(g, "Invalid error notifier %u", error_notifier);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_info, "pbdma id %d error notifier %d",
|
||||
pbdma_id, error_notifier);
|
||||
@@ -111,10 +119,14 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, u32 error_notifier,
|
||||
nvgpu_pbdma_status_is_chsw_switch(pbdma_status)) {
|
||||
id = pbdma_status->next_id;
|
||||
id_type = pbdma_status->next_id_type;
|
||||
} else {
|
||||
} else if (nvgpu_pbdma_status_ch_not_loaded(pbdma_status)) {
|
||||
/* Nothing to do here */
|
||||
nvgpu_err(g, "Invalid pbdma_status.id");
|
||||
return;
|
||||
nvgpu_log_info(g, "no channel loaded on pbdma.");
|
||||
goto out;
|
||||
} else {
|
||||
nvgpu_err(g, "pbdma status not valid");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (id_type == PBDMA_STATUS_ID_TYPE_TSGID) {
|
||||
@@ -128,7 +140,8 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, u32 error_notifier,
|
||||
struct nvgpu_tsg *tsg;
|
||||
if (ch == NULL) {
|
||||
nvgpu_err(g, "channel is not referenceable");
|
||||
return;
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tsg = nvgpu_tsg_from_ch(ch);
|
||||
@@ -138,12 +151,21 @@ void nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, u32 error_notifier,
|
||||
RC_TYPE_PBDMA_FAULT);
|
||||
} else {
|
||||
nvgpu_err(g, "chid: %d is not bound to tsg", ch->chid);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
nvgpu_channel_put(ch);
|
||||
} else {
|
||||
nvgpu_err(g, "Invalid pbdma_status.id_type");
|
||||
nvgpu_err(g, "Invalid pbdma_status id_type or next_id_type");
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
if (err != 0) {
|
||||
nvgpu_sw_quiesce(g);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -36,7 +36,6 @@ void ga10b_fifo_intr_0_isr(struct gk20a *g);
|
||||
void ga10b_fifo_intr_set_recover_mask(struct gk20a *g);
|
||||
void ga10b_fifo_intr_unset_recover_mask(struct gk20a *g);
|
||||
|
||||
void ga10b_fifo_pbdma_isr(struct gk20a *g, struct nvgpu_runlist *runlist, u32 pbdma_idx);
|
||||
void ga10b_fifo_runlist_intr_retrigger(struct gk20a *g, u32 intr_tree);
|
||||
|
||||
#endif /* NVGPU_FIFO_INTR_GA10B_H */
|
||||
|
||||
@@ -317,6 +317,36 @@ static void ga10b_fifo_runlist_intr_clear(struct gk20a *g)
|
||||
}
|
||||
}
|
||||
|
||||
static int ga10b_fifo_pbdma_isr(struct gk20a *g, struct nvgpu_runlist *runlist,
|
||||
u32 pbdma_idx)
|
||||
{
|
||||
u32 pbdma_id;
|
||||
const struct nvgpu_pbdma_info *pbdma_info;
|
||||
int err;
|
||||
|
||||
if (pbdma_idx >= PBDMA_PER_RUNLIST_SIZE) {
|
||||
nvgpu_err(g, "pbdma_idx(%d) >= max_pbdmas_per_runlist(%d)",
|
||||
pbdma_idx, PBDMA_PER_RUNLIST_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
pbdma_info = runlist->pbdma_info;
|
||||
pbdma_id = pbdma_info->pbdma_id[pbdma_idx];
|
||||
if (pbdma_id == PBDMA_ID_INVALID) {
|
||||
nvgpu_err(g, "runlist_id(%d), pbdma_idx(%d): invalid PBDMA",
|
||||
runlist->id, pbdma_idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = g->ops.pbdma.handle_intr(g, pbdma_id, true);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "pbdma intr failed id: %u", pbdma_idx);
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
void ga10b_fifo_intr_0_isr(struct gk20a *g)
|
||||
{
|
||||
u32 i, intr_0, handled_intr_0 = 0U;
|
||||
@@ -324,6 +354,7 @@ void ga10b_fifo_intr_0_isr(struct gk20a *g)
|
||||
u32 pbdma_idx = 0U;
|
||||
u32 intr_tree_0 = 0U, intr_tree_1 = 1U;
|
||||
struct nvgpu_runlist *runlist;
|
||||
int err = 0;
|
||||
|
||||
/* TODO: sw_ready is needed only for recovery part */
|
||||
if (!g->fifo.sw_ready) {
|
||||
@@ -349,7 +380,17 @@ void ga10b_fifo_intr_0_isr(struct gk20a *g)
|
||||
pbdma_idx++) {
|
||||
if (intr_0 &
|
||||
runlist_intr_0_pbdmai_intr_tree_j_pending_f(pbdma_idx, intr_tree_0)) {
|
||||
ga10b_fifo_pbdma_isr(g, runlist, pbdma_idx);
|
||||
/**
|
||||
* Quiesce is triggered as part of nvgpu_rc_pbdma_fault
|
||||
* failure case, so -
|
||||
* 1. Avoid looping through the rest of the PBDMAs by
|
||||
* adding a return statement here.
|
||||
* 2. Avoid re-triggering the PBDMA ISR by returning
|
||||
* pbdma_intr field value here in handled_intr_0.
|
||||
*/
|
||||
if (err == 0) {
|
||||
err = ga10b_fifo_pbdma_isr(g, runlist, pbdma_idx);
|
||||
}
|
||||
handled_intr_0 |= runlist_intr_0_pbdmai_intr_tree_j_pending_f(pbdma_idx, intr_tree_0);
|
||||
}
|
||||
}
|
||||
@@ -456,27 +497,6 @@ void ga10b_fifo_intr_unset_recover_mask(struct gk20a *g)
|
||||
|
||||
}
|
||||
|
||||
|
||||
void ga10b_fifo_pbdma_isr(struct gk20a *g, struct nvgpu_runlist *runlist, u32 pbdma_idx)
|
||||
{
|
||||
u32 pbdma_id;
|
||||
const struct nvgpu_pbdma_info *pbdma_info;
|
||||
|
||||
if (pbdma_idx >= PBDMA_PER_RUNLIST_SIZE) {
|
||||
nvgpu_err(g, "pbdma_idx(%d) >= max_pbdmas_per_runlist(%d)",
|
||||
pbdma_idx, PBDMA_PER_RUNLIST_SIZE);
|
||||
return;
|
||||
}
|
||||
pbdma_info = runlist->pbdma_info;
|
||||
pbdma_id = pbdma_info->pbdma_id[pbdma_idx];
|
||||
if (pbdma_id == PBDMA_ID_INVALID) {
|
||||
nvgpu_err(g, "runlist_id(%d), pbdma_idx(%d): invalid PBDMA",
|
||||
runlist->id, pbdma_idx);
|
||||
return;
|
||||
}
|
||||
g->ops.pbdma.handle_intr(g, pbdma_id, true);
|
||||
}
|
||||
|
||||
void ga10b_fifo_runlist_intr_retrigger(struct gk20a *g, u32 intr_tree)
|
||||
{
|
||||
u32 i = 0U;
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <nvgpu/nvgpu_err.h>
|
||||
#include <nvgpu/cic_mon.h>
|
||||
#include <nvgpu/engines.h>
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
|
||||
#include <hal/fifo/fifo_intr_gk20a.h>
|
||||
#include <hal/fifo/mmu_fault_gk20a.h>
|
||||
@@ -92,12 +93,25 @@ u32 gk20a_fifo_pbdma_isr(struct gk20a *g)
|
||||
u32 pbdma_id;
|
||||
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
|
||||
u32 pbdma_pending_bitmask = nvgpu_readl(g, fifo_intr_pbdma_id_r());
|
||||
int err;
|
||||
|
||||
for (pbdma_id = 0; pbdma_id < num_pbdma; pbdma_id++) {
|
||||
if (fifo_intr_pbdma_id_status_v(pbdma_pending_bitmask, pbdma_id) != 0U) {
|
||||
nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending",
|
||||
pbdma_id);
|
||||
g->ops.pbdma.handle_intr(g, pbdma_id, true);
|
||||
/**
|
||||
* Quiesce is triggered as part of nvgpu_rc_pbdma_fault
|
||||
* failure case, so -
|
||||
* 1. Avoid looping through the rest of the PBDMAs by
|
||||
* adding a return statement here.
|
||||
* 2. Avoid re-triggering the PBDMA ISR by returning
|
||||
* pbdma_intr field value here.
|
||||
*/
|
||||
err = g->ops.pbdma.handle_intr(g, pbdma_id, true);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "pbdma intr failed id: %u", pbdma_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return fifo_intr_0_pbdma_intr_pending_f();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -41,7 +41,7 @@ struct nvgpu_pbdma_status_info;
|
||||
struct nvgpu_device;
|
||||
|
||||
void ga10b_pbdma_intr_enable(struct gk20a *g, bool enable);
|
||||
void ga10b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover);
|
||||
int ga10b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover);
|
||||
bool ga10b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_0,
|
||||
u32 *error_notifier);
|
||||
bool ga10b_pbdma_handle_intr_1(struct gk20a *g, u32 pbdma_id, u32 pbdma_intr_1,
|
||||
|
||||
@@ -358,11 +358,11 @@ void ga10b_pbdma_intr_enable(struct gk20a *g, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ga10b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
||||
int ga10b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
||||
{
|
||||
struct nvgpu_pbdma_status_info pbdma_status;
|
||||
u32 intr_error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_ERROR;
|
||||
int err = 0;
|
||||
|
||||
u32 pbdma_intr_0 = nvgpu_readl(g, pbdma_intr_0_r(pbdma_id));
|
||||
u32 pbdma_intr_1 = nvgpu_readl(g, pbdma_intr_1_r(pbdma_id));
|
||||
@@ -377,9 +377,12 @@ void ga10b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
||||
g->ops.pbdma_status.read_pbdma_status_info(g,
|
||||
pbdma_id, &pbdma_status);
|
||||
if (recover) {
|
||||
nvgpu_rc_pbdma_fault(g, pbdma_id,
|
||||
err = nvgpu_rc_pbdma_fault(g, pbdma_id,
|
||||
intr_error_notifier,
|
||||
&pbdma_status);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "recovery failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
nvgpu_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
|
||||
@@ -391,17 +394,22 @@ void ga10b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
||||
pbdma_id, pbdma_intr_1);
|
||||
|
||||
if (g->ops.pbdma.handle_intr_1(g, pbdma_id, pbdma_intr_1,
|
||||
&intr_error_notifier)) {
|
||||
&intr_error_notifier) && (err == 0)) {
|
||||
g->ops.pbdma_status.read_pbdma_status_info(g,
|
||||
pbdma_id, &pbdma_status);
|
||||
if (recover) {
|
||||
nvgpu_rc_pbdma_fault(g, pbdma_id,
|
||||
err = nvgpu_rc_pbdma_fault(g, pbdma_id,
|
||||
intr_error_notifier,
|
||||
&pbdma_status);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "recovery failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
nvgpu_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool ga10b_pbdma_handle_intr_0_legacy(struct gk20a *g, u32 pbdma_id,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -34,7 +34,7 @@ struct nvgpu_device;
|
||||
|
||||
bool gm20b_pbdma_handle_intr_0(struct gk20a *g, u32 pbdma_id,
|
||||
u32 pbdma_intr_0, u32 *error_notifier);
|
||||
void gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover);
|
||||
int gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover);
|
||||
|
||||
u32 gm20b_pbdma_read_data(struct gk20a *g, u32 pbdma_id);
|
||||
void gm20b_pbdma_reset_header(struct gk20a *g, u32 pbdma_id);
|
||||
|
||||
@@ -323,10 +323,11 @@ u32 gm20b_pbdma_restartable_0_intr_descs(void)
|
||||
return restartable_0_intr_descs;
|
||||
}
|
||||
|
||||
void gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
||||
int gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
||||
{
|
||||
struct nvgpu_pbdma_status_info pbdma_status;
|
||||
u32 intr_error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_ERROR;
|
||||
int err = 0;
|
||||
|
||||
u32 pbdma_intr_0 = nvgpu_readl(g, pbdma_intr_0_r(pbdma_id));
|
||||
u32 pbdma_intr_1 = nvgpu_readl(g, pbdma_intr_1_r(pbdma_id));
|
||||
@@ -341,9 +342,12 @@ void gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
||||
g->ops.pbdma_status.read_pbdma_status_info(g,
|
||||
pbdma_id, &pbdma_status);
|
||||
if (recover) {
|
||||
nvgpu_rc_pbdma_fault(g, pbdma_id,
|
||||
err = nvgpu_rc_pbdma_fault(g, pbdma_id,
|
||||
intr_error_notifier,
|
||||
&pbdma_status);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "recovery failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
nvgpu_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
|
||||
@@ -355,18 +359,23 @@ void gm20b_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
||||
pbdma_id, pbdma_intr_1);
|
||||
|
||||
if (g->ops.pbdma.handle_intr_1(g, pbdma_id, pbdma_intr_1,
|
||||
&intr_error_notifier)) {
|
||||
&intr_error_notifier) && (err == 0)) {
|
||||
g->ops.pbdma_status.read_pbdma_status_info(g,
|
||||
pbdma_id, &pbdma_status);
|
||||
if (recover) {
|
||||
nvgpu_rc_pbdma_fault(g, pbdma_id,
|
||||
err = nvgpu_rc_pbdma_fault(g, pbdma_id,
|
||||
intr_error_notifier,
|
||||
&pbdma_status);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "recovery failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
u32 gm20b_pbdma_get_gp_base(u64 gpfifo_base)
|
||||
|
||||
@@ -131,7 +131,11 @@ int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid,
|
||||
* reported to SW.
|
||||
*/
|
||||
|
||||
g->ops.pbdma.handle_intr(g, pbdma_id, false);
|
||||
ret = g->ops.pbdma.handle_intr(g, pbdma_id, false);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g, "pbdma intr failed id: %u %d", pbdma_id, ret);
|
||||
break;
|
||||
}
|
||||
|
||||
g->ops.pbdma_status.read_pbdma_status_info(g,
|
||||
pbdma_id, &pbdma_status);
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
|
||||
struct nvgpu_channel;
|
||||
|
||||
enum {
|
||||
enum nvgpu_err_notif {
|
||||
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT = 0,
|
||||
NVGPU_ERR_NOTIFIER_GR_ERROR_SW_METHOD,
|
||||
NVGPU_ERR_NOTIFIER_GR_ERROR_SW_NOTIFY,
|
||||
@@ -40,6 +40,7 @@ enum {
|
||||
NVGPU_ERR_NOTIFIER_RESETCHANNEL_VERIF_ERROR,
|
||||
NVGPU_ERR_NOTIFIER_PBDMA_PUSHBUFFER_CRC_MISMATCH,
|
||||
NVGPU_ERR_NOTIFIER_CE_ERROR,
|
||||
NVGPU_ERR_NOTIFIER_INVAL,
|
||||
};
|
||||
|
||||
void nvgpu_set_err_notifier_locked(struct nvgpu_channel *ch, u32 error);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -51,7 +51,7 @@ struct gops_pbdma {
|
||||
bool (*handle_intr_1)(struct gk20a *g,
|
||||
u32 pbdma_id, u32 pbdma_intr_1,
|
||||
u32 *error_notifier);
|
||||
void (*handle_intr)(struct gk20a *g, u32 pbdma_id, bool recover);
|
||||
int (*handle_intr)(struct gk20a *g, u32 pbdma_id, bool recover);
|
||||
u32 (*set_clear_intr_offsets) (struct gk20a *g,
|
||||
u32 set_clear_size);
|
||||
u32 (*get_signature)(struct gk20a *g);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -150,6 +150,17 @@ bool nvgpu_pbdma_status_is_chsw_save(struct nvgpu_pbdma_status_info
|
||||
*/
|
||||
bool nvgpu_pbdma_status_is_chsw_valid(struct nvgpu_pbdma_status_info
|
||||
*pbdma_status);
|
||||
/**
|
||||
* @brief Check if chsw_status is set to invalid.
|
||||
*
|
||||
* @param pbdma_status [in] Pointer to struct containing pbdma_status h/w
|
||||
* reg/field value.
|
||||
*
|
||||
* @return Interprets #pbdma_status and returns true if channel
|
||||
* status is set to #NVGPU_PBDMA_CHSW_STATUS_INVALID else returns false.
|
||||
*/
|
||||
bool nvgpu_pbdma_status_ch_not_loaded(struct nvgpu_pbdma_status_info
|
||||
*pbdma_status);
|
||||
/**
|
||||
* @brief Check if id_type is tsg.
|
||||
*
|
||||
|
||||
@@ -103,6 +103,7 @@ struct nvgpu_tsg;
|
||||
struct nvgpu_channel;
|
||||
struct nvgpu_pbdma_status_info;
|
||||
struct mmu_fault_info;
|
||||
enum nvgpu_err_notif;
|
||||
|
||||
static inline const char *nvgpu_rc_type_to_str(unsigned int rc_type)
|
||||
{
|
||||
@@ -172,8 +173,16 @@ void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
|
||||
*
|
||||
* Do PBDMA fault recovery. Set error notifier as per \a error_notifier and call
|
||||
* \a nvgpu_rc_tsg_and_related_engines to do the recovery.
|
||||
*
|
||||
* @return 0 in case of success, < 0 in case of failure.
|
||||
* @retval -EINVAL in case of following cases:
|
||||
* 1. the error_notifier is invalid.
|
||||
* 2. the pbdma status is invalid.
|
||||
* 3. the channel is not referenceable.
|
||||
* 4. the channel is not bound to tsg.
|
||||
* 5. the id type or next_id type are not indicating channel id type or tsg id type.
|
||||
*/
|
||||
void nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, u32 error_notifier,
|
||||
int nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, enum nvgpu_err_notif error_notifier,
|
||||
struct nvgpu_pbdma_status_info *pbdma_status);
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user