mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: move .preempt_trigger/.is_preempt_pending to IDs
.preempt_tsg uses .preempt_trigger/.is_preempt_pending, so they both have to use runlist_id and tsgid too. Jira GVSCI-15770 Change-Id: Ida24d160c362ea1348d7c19e6d0352bb390d0a64 Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2863442 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Ramesh Mylavarapu <rmylavarapu@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8f5adab299
commit
da1da8f563
@@ -72,7 +72,7 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
|||||||
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
||||||
PMU_MUTEX_ID_FIFO, &token);
|
PMU_MUTEX_ID_FIFO, &token);
|
||||||
#endif
|
#endif
|
||||||
g->ops.fifo.preempt_trigger(g, tsgid, ID_TYPE_TSG);
|
g->ops.fifo.preempt_trigger(g, runlist_id, tsgid, ID_TYPE_TSG);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Poll for preempt done. if stalling interrupts are pending
|
* Poll for preempt done. if stalling interrupts are pending
|
||||||
@@ -83,7 +83,7 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
|||||||
* the engines hung and set the runlist reset_eng_bitmask
|
* the engines hung and set the runlist reset_eng_bitmask
|
||||||
* and mark preemption completion.
|
* and mark preemption completion.
|
||||||
*/
|
*/
|
||||||
ret = g->ops.fifo.is_preempt_pending(g, tsgid,
|
ret = g->ops.fifo.is_preempt_pending(g, runlist_id, tsgid,
|
||||||
ID_TYPE_TSG, preempt_retry_count > 1U);
|
ID_TYPE_TSG, preempt_retry_count > 1U);
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_LS_PMU
|
#ifdef CONFIG_NVGPU_LS_PMU
|
||||||
@@ -198,7 +198,7 @@ void nvgpu_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_bitmask)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* issue runlist preempt */
|
/* issue runlist preempt */
|
||||||
g->ops.fifo.preempt_trigger(g, runlist->id,
|
g->ops.fifo.preempt_trigger(g, runlist->id, INVAL_ID,
|
||||||
ID_TYPE_RUNLIST);
|
ID_TYPE_RUNLIST);
|
||||||
#ifdef CONFIG_NVGPU_RECOVERY
|
#ifdef CONFIG_NVGPU_RECOVERY
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -26,6 +26,7 @@
|
|||||||
|
|
||||||
struct gk20a;
|
struct gk20a;
|
||||||
|
|
||||||
void ga10b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type);
|
void ga10b_fifo_preempt_trigger(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 id, unsigned int id_type);
|
||||||
|
|
||||||
#endif /* FIFO_PREEMPT_GA10B_H */
|
#endif /* FIFO_PREEMPT_GA10B_H */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -31,22 +31,24 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/ga10b/hw_runlist_ga10b.h>
|
#include <nvgpu/hw/ga10b/hw_runlist_ga10b.h>
|
||||||
|
|
||||||
void ga10b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type)
|
void ga10b_fifo_preempt_trigger(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 tsgid, unsigned int id_type)
|
||||||
{
|
{
|
||||||
struct nvgpu_runlist *runlist = NULL;
|
struct nvgpu_runlist *runlist;
|
||||||
|
|
||||||
if (id == INVAL_ID) {
|
if (runlist_id == INVAL_ID ||
|
||||||
|
(tsgid == INVAL_ID && id_type == ID_TYPE_TSG)) {
|
||||||
nvgpu_log(g, gpu_dbg_info, "Invalid id, cannot preempt");
|
nvgpu_log(g, gpu_dbg_info, "Invalid id, cannot preempt");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runlist = g->fifo.runlists[runlist_id];
|
||||||
|
|
||||||
if (id_type == ID_TYPE_TSG) {
|
if (id_type == ID_TYPE_TSG) {
|
||||||
struct nvgpu_tsg *tsg = &g->fifo.tsg[id];
|
nvgpu_runlist_writel(g, runlist, runlist_preempt_r(),
|
||||||
nvgpu_runlist_writel(g, tsg->runlist, runlist_preempt_r(),
|
runlist_preempt_id_f(tsgid) |
|
||||||
runlist_preempt_id_f(id) |
|
|
||||||
runlist_preempt_type_tsg_f());
|
runlist_preempt_type_tsg_f());
|
||||||
} else if (id_type == ID_TYPE_RUNLIST) {
|
} else if (id_type == ID_TYPE_RUNLIST) {
|
||||||
runlist = g->fifo.runlists[id];
|
|
||||||
nvgpu_runlist_writel(g, runlist, runlist_preempt_r(),
|
nvgpu_runlist_writel(g, runlist, runlist_preempt_r(),
|
||||||
runlist_preempt_type_runlist_f());
|
runlist_preempt_type_runlist_f());
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -40,15 +40,16 @@
|
|||||||
|
|
||||||
#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
|
#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
|
||||||
|
|
||||||
void gk20a_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type)
|
void gk20a_fifo_preempt_trigger(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 tsgid, unsigned int id_type)
|
||||||
{
|
{
|
||||||
if (id_type == ID_TYPE_TSG) {
|
if (id_type == ID_TYPE_TSG) {
|
||||||
nvgpu_writel(g, fifo_preempt_r(),
|
nvgpu_writel(g, fifo_preempt_r(),
|
||||||
fifo_preempt_id_f(id) |
|
fifo_preempt_id_f(tsgid) |
|
||||||
fifo_preempt_type_tsg_f());
|
fifo_preempt_type_tsg_f());
|
||||||
} else {
|
} else {
|
||||||
nvgpu_writel(g, fifo_preempt_r(),
|
nvgpu_writel(g, fifo_preempt_r(),
|
||||||
fifo_preempt_chid_f(id) |
|
fifo_preempt_chid_f(runlist_id) |
|
||||||
fifo_preempt_type_channel_f());
|
fifo_preempt_type_channel_f());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -58,14 +59,14 @@ static int gk20a_fifo_preempt_locked(struct gk20a *g, u32 id,
|
|||||||
{
|
{
|
||||||
nvgpu_log_fn(g, "id: %d id_type: %d", id, id_type);
|
nvgpu_log_fn(g, "id: %d id_type: %d", id, id_type);
|
||||||
|
|
||||||
/* issue preempt */
|
/* issue preempt, runlist_id not used for gm20b and prior */
|
||||||
g->ops.fifo.preempt_trigger(g, id, id_type);
|
g->ops.fifo.preempt_trigger(g, INVAL_ID, id, id_type);
|
||||||
|
|
||||||
/* wait for preempt */
|
/* wait for preempt, runlist_id not used for gm20b and prior */
|
||||||
return g->ops.fifo.is_preempt_pending(g, id, id_type, false);
|
return g->ops.fifo.is_preempt_pending(g, INVAL_ID, id, id_type, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 runlist_id, u32 id,
|
||||||
unsigned int id_type, bool preempt_retries_left)
|
unsigned int id_type, bool preempt_retries_left)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
@@ -73,6 +74,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
(void)preempt_retries_left;
|
(void)preempt_retries_left;
|
||||||
|
(void)runlist_id;
|
||||||
|
|
||||||
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
|
||||||
|
|
||||||
|
|||||||
@@ -28,10 +28,11 @@ struct gk20a;
|
|||||||
struct nvgpu_channel;
|
struct nvgpu_channel;
|
||||||
struct nvgpu_tsg;
|
struct nvgpu_tsg;
|
||||||
|
|
||||||
void gk20a_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type);
|
void gk20a_fifo_preempt_trigger(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 tsgid, unsigned int id_type);
|
||||||
int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
|
int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
|
||||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid);
|
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid);
|
||||||
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 runlist_id, u32 id,
|
||||||
unsigned int id_type, bool preempt_retries_left);
|
unsigned int id_type, bool preempt_retries_left);
|
||||||
|
|
||||||
#endif /* FIFO_PREEMPT_GK20A_H */
|
#endif /* FIFO_PREEMPT_GK20A_H */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -30,9 +30,10 @@ struct gk20a;
|
|||||||
struct nvgpu_channel;
|
struct nvgpu_channel;
|
||||||
struct nvgpu_tsg;
|
struct nvgpu_tsg;
|
||||||
|
|
||||||
void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type);
|
void gv11b_fifo_preempt_trigger(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 id, unsigned int id_type);
|
||||||
int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
|
int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
|
||||||
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 runlist_id, u32 id,
|
||||||
unsigned int id_type, bool preempt_retries_left);
|
unsigned int id_type, bool preempt_retries_left);
|
||||||
int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid, u32 pbdma_id);
|
int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid, u32 pbdma_id);
|
||||||
|
|
||||||
|
|||||||
@@ -44,7 +44,7 @@
|
|||||||
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
|
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
|
||||||
|
|
||||||
|
|
||||||
void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type)
|
void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 runlist_id, u32 id, unsigned int id_type)
|
||||||
{
|
{
|
||||||
if (id_type == ID_TYPE_TSG) {
|
if (id_type == ID_TYPE_TSG) {
|
||||||
nvgpu_writel(g, fifo_preempt_r(),
|
nvgpu_writel(g, fifo_preempt_r(),
|
||||||
@@ -54,7 +54,7 @@ void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type)
|
|||||||
u32 reg_val;
|
u32 reg_val;
|
||||||
|
|
||||||
reg_val = nvgpu_readl(g, fifo_runlist_preempt_r());
|
reg_val = nvgpu_readl(g, fifo_runlist_preempt_r());
|
||||||
reg_val |= BIT32(id);
|
reg_val |= BIT32(runlist_id);
|
||||||
nvgpu_writel(g, fifo_runlist_preempt_r(), reg_val);
|
nvgpu_writel(g, fifo_runlist_preempt_r(), reg_val);
|
||||||
} else {
|
} else {
|
||||||
nvgpu_log_info(g, "channel preempt is noop");
|
nvgpu_log_info(g, "channel preempt is noop");
|
||||||
@@ -312,7 +312,7 @@ static int gv11b_fifo_preempt_poll_eng(struct gk20a *g, u32 id,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 runlist_id, u32 id,
|
||||||
unsigned int id_type, bool preempt_retries_left)
|
unsigned int id_type, bool preempt_retries_left)
|
||||||
{
|
{
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
struct nvgpu_fifo *f = &g->fifo;
|
||||||
@@ -325,13 +325,11 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
|||||||
int err, ret = 0;
|
int err, ret = 0;
|
||||||
u32 tsgid;
|
u32 tsgid;
|
||||||
|
|
||||||
if (id_type == ID_TYPE_TSG) {
|
/* GV11B onward, the function only supports tsg preemption */
|
||||||
rl = f->tsg[id].runlist;
|
nvgpu_assert(id_type == ID_TYPE_TSG);
|
||||||
|
|
||||||
|
rl = f->runlists[runlist_id];
|
||||||
tsgid = id;
|
tsgid = id;
|
||||||
} else {
|
|
||||||
rl = f->channel[id].runlist;
|
|
||||||
tsgid = f->channel[id].tsgid;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid);
|
nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid);
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -100,7 +100,8 @@ int ga10b_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
g->ops.fifo.preempt_trigger(g, preempt_id, preempt_type != 0U);
|
g->ops.fifo.preempt_trigger(g,
|
||||||
|
runlist->id, preempt_id, preempt_type != 0U);
|
||||||
#ifdef TRACEPOINTS_ENABLED
|
#ifdef TRACEPOINTS_ENABLED
|
||||||
trace_gk20a_reschedule_preempt_next(ch->chid, fecsstat0,
|
trace_gk20a_reschedule_preempt_next(ch->chid, fecsstat0,
|
||||||
engine_status.reg_data, fecsstat1,
|
engine_status.reg_data, fecsstat1,
|
||||||
@@ -109,7 +110,7 @@ int ga10b_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
|||||||
nvgpu_runlist_readl(g, runlist, runlist_preempt_r());
|
nvgpu_runlist_readl(g, runlist, runlist_preempt_r());
|
||||||
#endif
|
#endif
|
||||||
if (wait_preempt) {
|
if (wait_preempt) {
|
||||||
if (g->ops.fifo.is_preempt_pending(g, preempt_id,
|
if (g->ops.fifo.is_preempt_pending(g, runlist->id, preempt_id,
|
||||||
preempt_type, false) != 0) {
|
preempt_type, false) != 0) {
|
||||||
nvgpu_err(g, "fifo preempt timed out");
|
nvgpu_err(g, "fifo preempt timed out");
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -100,7 +100,7 @@ int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
g->ops.fifo.preempt_trigger(g, preempt_id, preempt_type != 0U);
|
g->ops.fifo.preempt_trigger(g, runlist->id, preempt_id, preempt_type != 0U);
|
||||||
#ifdef TRACEPOINTS_ENABLED
|
#ifdef TRACEPOINTS_ENABLED
|
||||||
trace_gk20a_reschedule_preempt_next(ch->chid, fecsstat0,
|
trace_gk20a_reschedule_preempt_next(ch->chid, fecsstat0,
|
||||||
engine_status.reg_data, fecsstat1,
|
engine_status.reg_data, fecsstat1,
|
||||||
@@ -109,7 +109,7 @@ int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
|||||||
nvgpu_readl(g, fifo_preempt_r()));
|
nvgpu_readl(g, fifo_preempt_r()));
|
||||||
#endif
|
#endif
|
||||||
if (wait_preempt) {
|
if (wait_preempt) {
|
||||||
if (g->ops.fifo.is_preempt_pending(g, preempt_id,
|
if (g->ops.fifo.is_preempt_pending(g, runlist->id, preempt_id,
|
||||||
preempt_type, false) != 0) {
|
preempt_type, false) != 0) {
|
||||||
nvgpu_err(g, "fifo preempt timed out");
|
nvgpu_err(g, "fifo preempt timed out");
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GP10B GPU GR
|
* GP10B GPU GR
|
||||||
*
|
*
|
||||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -417,7 +417,8 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct nvgpu_channel
|
|||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
||||||
"CILP: tsgid: 0x%x", tsg->tsgid);
|
"CILP: tsgid: 0x%x", tsg->tsgid);
|
||||||
|
|
||||||
g->ops.fifo.preempt_trigger(g, tsg->tsgid, ID_TYPE_TSG);
|
g->ops.fifo.preempt_trigger(g,
|
||||||
|
tsg->runlist->id, tsg->tsgid, ID_TYPE_TSG);
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
|
||||||
"CILP: preempted tsg");
|
"CILP: preempted tsg");
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -209,10 +209,11 @@ struct gops_fifo {
|
|||||||
*
|
*
|
||||||
* @return: None
|
* @return: None
|
||||||
*/
|
*/
|
||||||
void (*preempt_trigger)(struct gk20a *g, u32 id, unsigned int id_type);
|
void (*preempt_trigger)(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 tsgid, unsigned int id_type);
|
||||||
int (*preempt_poll_pbdma)(struct gk20a *g, u32 tsgid,
|
int (*preempt_poll_pbdma)(struct gk20a *g, u32 tsgid,
|
||||||
u32 pbdma_id);
|
u32 pbdma_id);
|
||||||
int (*is_preempt_pending)(struct gk20a *g, u32 id,
|
int (*is_preempt_pending)(struct gk20a *g, u32 runlist_id, u32 id,
|
||||||
unsigned int id_type, bool preempt_retries_left);
|
unsigned int id_type, bool preempt_retries_left);
|
||||||
void (*intr_set_recover_mask)(struct gk20a *g);
|
void (*intr_set_recover_mask)(struct gk20a *g);
|
||||||
void (*intr_unset_recover_mask)(struct gk20a *g);
|
void (*intr_unset_recover_mask)(struct gk20a *g);
|
||||||
|
|||||||
@@ -95,14 +95,14 @@ int test_gv11b_fifo_preempt_trigger(struct unit_module *m, struct gk20a *g,
|
|||||||
__func__, branches_str(branches, f_preempt_trigger));
|
__func__, branches_str(branches, f_preempt_trigger));
|
||||||
|
|
||||||
if (branches & F_PREEMPT_TRIGGER_TSG) {
|
if (branches & F_PREEMPT_TRIGGER_TSG) {
|
||||||
gv11b_fifo_preempt_trigger(g, 5U, ID_TYPE_TSG);
|
gv11b_fifo_preempt_trigger(g, 0U, 5U, ID_TYPE_TSG);
|
||||||
expected_reg_val = fifo_preempt_id_f(5U) |
|
expected_reg_val = fifo_preempt_id_f(5U) |
|
||||||
fifo_preempt_type_tsg_f();
|
fifo_preempt_type_tsg_f();
|
||||||
unit_assert(expected_reg_val ==
|
unit_assert(expected_reg_val ==
|
||||||
nvgpu_readl(g, fifo_preempt_r()), goto done);
|
nvgpu_readl(g, fifo_preempt_r()), goto done);
|
||||||
nvgpu_writel(g, fifo_preempt_r(), orig_reg_val);
|
nvgpu_writel(g, fifo_preempt_r(), orig_reg_val);
|
||||||
} else {
|
} else {
|
||||||
gv11b_fifo_preempt_trigger(g, 5U, ID_TYPE_CHANNEL);
|
gv11b_fifo_preempt_trigger(g, 0U, 5U, ID_TYPE_CHANNEL);
|
||||||
unit_assert(orig_reg_val ==
|
unit_assert(orig_reg_val ==
|
||||||
nvgpu_readl(g, fifo_preempt_r()), goto done);
|
nvgpu_readl(g, fifo_preempt_r()), goto done);
|
||||||
}
|
}
|
||||||
@@ -199,20 +199,22 @@ done:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void stub_fifo_preempt_trigger(struct gk20a *g, u32 id,
|
static void stub_fifo_preempt_trigger(struct gk20a *g, u32 runlist_id, u32 id,
|
||||||
unsigned int id_type)
|
unsigned int id_type)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_fifo_is_preempt_pending_ebusy(struct gk20a *g, u32 id,
|
static int stub_fifo_is_preempt_pending_ebusy(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 id,
|
||||||
unsigned int id_type,
|
unsigned int id_type,
|
||||||
bool preempt_retries_left)
|
bool preempt_retries_left)
|
||||||
{
|
{
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_fifo_is_preempt_pending_pass(struct gk20a *g, u32 id,
|
static int stub_fifo_is_preempt_pending_pass(struct gk20a *g,
|
||||||
|
u32 runlist_id, u32 id,
|
||||||
unsigned int id_type,
|
unsigned int id_type,
|
||||||
bool preempt_retries_left)
|
bool preempt_retries_left)
|
||||||
{
|
{
|
||||||
@@ -457,7 +459,7 @@ int test_gv11b_fifo_is_preempt_pending(struct unit_module *m, struct gk20a *g,
|
|||||||
/* Modify eng_stat for engine 0 */
|
/* Modify eng_stat for engine 0 */
|
||||||
nvgpu_writel(g, fifo_engine_status_r(0U), stub.eng_stat);
|
nvgpu_writel(g, fifo_engine_status_r(0U), stub.eng_stat);
|
||||||
|
|
||||||
err = gv11b_fifo_is_preempt_pending(g, 0U, id_type, false);
|
err = gv11b_fifo_is_preempt_pending(g, 0U, 0U, id_type, false);
|
||||||
|
|
||||||
if (branches & F_PREEMPT_PENDING_POLL_PBDMA_FAIL) {
|
if (branches & F_PREEMPT_PENDING_POLL_PBDMA_FAIL) {
|
||||||
unit_assert(err == -ETIMEDOUT, goto done);
|
unit_assert(err == -ETIMEDOUT, goto done);
|
||||||
|
|||||||
Reference in New Issue
Block a user