gpu: nvgpu: move generic preempt hals to common

- Move fifo.preempt_runlists_for_rc and fifo.preempt_tsg hals to common
source file as nvgpu_fifo_preempt_runlists_for_rc and
nvgpu_fifo_preempt_tsg.

Jira NVGPU-4881

Change-Id: I31f7973276c075130d8a0ac684c6c99e35be6017
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2323866
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2020-04-10 08:59:46 -07:00
committed by Alex Waterman
parent fa73b73ba7
commit c6908922e5
16 changed files with 215 additions and 189 deletions

View File

@@ -27,6 +27,7 @@
#include <nvgpu/fifo.h> #include <nvgpu/fifo.h>
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
#include <nvgpu/preempt.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/pbdma.h> #include <nvgpu/pbdma.h>
@@ -288,6 +289,6 @@ void nvgpu_fifo_sw_quiesce(struct gk20a *g)
g->ops.runlist.write_state(g, runlist_mask, RUNLIST_DISABLED); g->ops.runlist.write_state(g, runlist_mask, RUNLIST_DISABLED);
/* Preempt all runlists */ /* Preempt all runlists */
g->ops.fifo.preempt_runlists_for_rc(g, runlist_mask); nvgpu_fifo_preempt_runlists_for_rc(g, runlist_mask);
} }
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -20,6 +20,7 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include <nvgpu/soc.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
#include <nvgpu/types.h> #include <nvgpu/types.h>
@@ -27,13 +28,75 @@
#include <nvgpu/tsg.h> #include <nvgpu/tsg.h>
#include <nvgpu/preempt.h> #include <nvgpu/preempt.h>
#include <nvgpu/nvgpu_err.h> #include <nvgpu/nvgpu_err.h>
#include <nvgpu/rc.h>
#ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/mutex.h>
#endif
u32 nvgpu_preempt_get_timeout(struct gk20a *g) u32 nvgpu_preempt_get_timeout(struct gk20a *g)
{ {
return g->ctxsw_timeout_period_ms; return g->ctxsw_timeout_period_ms;
} }
int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{
struct nvgpu_fifo *f = &g->fifo;
int ret = 0;
#ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
u32 runlist_id;
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
runlist_id = tsg->runlist_id;
if (runlist_id == NVGPU_INVALID_RUNLIST_ID) {
return 0;
}
nvgpu_mutex_acquire(&f->runlist_info[runlist_id]->runlist_lock);
/* WAR for Bug 2065990 */
nvgpu_tsg_disable_sched(g, tsg);
#ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
nvgpu_log_fn(g, "preempt id: %d", tsg->tsgid);
g->ops.fifo.preempt_trigger(g, tsg->tsgid, ID_TYPE_TSG);
/* poll for preempt done */
ret = g->ops.fifo.is_preempt_pending(g, tsg->tsgid, ID_TYPE_TSG);
#ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) {
int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token);
if (err != 0) {
nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d",
err);
}
}
#endif
/* WAR for Bug 2065990 */
nvgpu_tsg_enable_sched(g, tsg);
nvgpu_mutex_release(&f->runlist_info[runlist_id]->runlist_lock);
if (ret != 0) {
if (nvgpu_platform_is_silicon(g)) {
nvgpu_err(g, "preempt timed out for tsgid: %u, "
"ctxsw timeout will trigger recovery if needed",
tsg->tsgid);
} else {
nvgpu_rc_preempt_timeout(g, tsg);
}
}
return ret;
}
int nvgpu_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch) int nvgpu_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
{ {
int err; int err;
@@ -85,3 +148,56 @@ void nvgpu_preempt_poll_tsg_on_pbdma(struct gk20a *g,
} }
} }
} }
/*
* This should be called with runlist_lock held for all the
* runlists set in runlists_mask
*/
void nvgpu_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_bitmask)
{
struct nvgpu_fifo *f = &g->fifo;
u32 i;
#ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
/* runlist_lock are locked by teardown and sched are disabled too */
nvgpu_log_fn(g, "preempt runlists_bitmask:0x%08x", runlists_bitmask);
#ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
for (i = 0U; i < f->num_runlists; i++) {
struct nvgpu_runlist_info *runlist;
runlist = &f->active_runlist_info[i];
if ((BIT32(runlist->runlist_id) & runlists_bitmask) == 0U) {
continue;
}
/* issue runlist preempt */
g->ops.fifo.preempt_trigger(g, runlist->runlist_id,
ID_TYPE_RUNLIST);
#ifdef CONFIG_NVGPU_RECOVERY
/*
* Preemption will never complete in RC due to some
* fatal condition. Do not poll for preemption to
* complete. Reset engines served by runlists.
*/
runlist->reset_eng_bitmask = runlist->eng_bitmask;
#endif
}
#ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) {
int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token);
if (err != 0) {
nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d",
err);
}
}
#endif
}

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -32,11 +32,8 @@ struct nvgpu_tsg;
void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type); void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type);
int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch); int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg);
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type); unsigned int id_type);
void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask);
int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid, u32 pbdma_id); int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid, u32 pbdma_id);
#endif /* FIFO_PREEMPT_GV11B_H */ #endif /* FIFO_PREEMPT_GV11B_H */

View File

@@ -27,7 +27,6 @@
#include <nvgpu/ptimer.h> #include <nvgpu/ptimer.h>
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/fifo.h> #include <nvgpu/fifo.h>
#include <nvgpu/rc.h>
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
@@ -52,87 +51,17 @@ void gv11b_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type)
nvgpu_writel(g, fifo_preempt_r(), nvgpu_writel(g, fifo_preempt_r(),
fifo_preempt_id_f(id) | fifo_preempt_id_f(id) |
fifo_preempt_type_tsg_f()); fifo_preempt_type_tsg_f());
} else if (id_type == ID_TYPE_RUNLIST) {
u32 reg_val;
reg_val = nvgpu_readl(g, fifo_runlist_preempt_r());
reg_val |= BIT32(id);
nvgpu_writel(g, fifo_runlist_preempt_r(), reg_val);
} else { } else {
nvgpu_log_info(g, "channel preempt is noop"); nvgpu_log_info(g, "channel preempt is noop");
} }
} }
static void gv11b_fifo_issue_runlist_preempt(struct gk20a *g,
u32 runlists_mask)
{
u32 reg_val;
/* issue runlist preempt */
reg_val = nvgpu_readl(g, fifo_runlist_preempt_r());
reg_val |= runlists_mask;
nvgpu_writel(g, fifo_runlist_preempt_r(), reg_val);
}
static int gv11b_fifo_preempt_locked(struct gk20a *g, u32 id,
unsigned int id_type)
{
nvgpu_log_fn(g, "preempt id: %d id_type: %d", id, id_type);
g->ops.fifo.preempt_trigger(g, id, id_type);
/* poll for preempt done */
return g->ops.fifo.is_preempt_pending(g, id, id_type);
}
/*
* This should be called with runlist_lock held for all the
* runlists set in runlists_mask
*/
void gv11b_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_mask)
{
#ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
#ifdef CONFIG_NVGPU_RECOVERY
struct nvgpu_fifo *f = &g->fifo;
u32 i;
#endif
/* runlist_lock are locked by teardown and sched are disabled too */
nvgpu_log_fn(g, "preempt runlists_mask:0x%08x", runlists_mask);
#ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
/* issue runlist preempt */
gv11b_fifo_issue_runlist_preempt(g, runlists_mask);
#ifdef CONFIG_NVGPU_RECOVERY
/*
* Preemption will never complete in RC due to some fatal condition.
* Do not poll for preemption to complete. Reset engines served by
* runlists.
*/
for (i = 0U; i < f->num_runlists; i++) {
struct nvgpu_runlist_info *runlist;
runlist = &f->active_runlist_info[i];
if ((fifo_runlist_preempt_runlist_m(runlist->runlist_id) &
runlists_mask) != 0U) {
runlist->reset_eng_bitmask = runlist->eng_bitmask;
}
}
#endif
#ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) {
int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token);
if (err != 0) {
nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d",
err);
}
}
#endif
}
static int fifo_preempt_check_tsg_on_pbdma(u32 tsgid, static int fifo_preempt_check_tsg_on_pbdma(u32 tsgid,
struct nvgpu_pbdma_status_info *pbdma_status) struct nvgpu_pbdma_status_info *pbdma_status)
{ {
@@ -446,58 +375,3 @@ int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
/* Preempt tsg. Channel preempt is NOOP */ /* Preempt tsg. Channel preempt is NOOP */
return g->ops.fifo.preempt_tsg(g, tsg); return g->ops.fifo.preempt_tsg(g, tsg);
} }
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
{
struct nvgpu_fifo *f = &g->fifo;
int ret = 0;
#ifdef CONFIG_NVGPU_LS_PMU
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
int mutex_ret = 0;
#endif
u32 runlist_id;
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
runlist_id = tsg->runlist_id;
nvgpu_log_fn(g, "runlist_id: %d", runlist_id);
if (runlist_id == NVGPU_INVALID_RUNLIST_ID) {
return 0;
}
nvgpu_mutex_acquire(&f->runlist_info[runlist_id]->runlist_lock);
/* WAR for Bug 2065990 */
nvgpu_tsg_disable_sched(g, tsg);
#ifdef CONFIG_NVGPU_LS_PMU
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token);
#endif
ret = gv11b_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG);
#ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) {
int err = nvgpu_pmu_lock_release(g, g->pmu, PMU_MUTEX_ID_FIFO,
&token);
if (err != 0) {
nvgpu_err(g, "PMU_MUTEX_ID_FIFO not released err=%d",
err);
}
}
#endif
/* WAR for Bug 2065990 */
nvgpu_tsg_enable_sched(g, tsg);
nvgpu_mutex_release(&f->runlist_info[runlist_id]->runlist_lock);
if (ret != 0) {
if (nvgpu_platform_is_silicon(g)) {
nvgpu_err(g, "preempt timed out for tsgid: %u, "
"ctxsw timeout will trigger recovery if needed",
tsg->tsgid);
} else {
nvgpu_rc_preempt_timeout(g, tsg);
}
}
return ret;
}

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -35,19 +35,17 @@
#define FECS_MAILBOX_0_ACK_RESTORE 0x4U #define FECS_MAILBOX_0_ACK_RESTORE 0x4U
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next)
{
return nvgpu_runlist_reschedule(ch, preempt_next, true);
}
#endif
u32 gk20a_runlist_count_max(void) u32 gk20a_runlist_count_max(void)
{ {
return fifo_eng_runlist_base__size_1_v(); return fifo_eng_runlist_base__size_1_v();
} }
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next)
{
return nvgpu_runlist_reschedule(ch, preempt_next, true);
}
/* trigger host preempt of GR pending load ctx if that ctx is not for ch */ /* trigger host preempt of GR pending load ctx if that ctx is not for ch */
int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch, int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
bool wait_preempt) bool wait_preempt)

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -30,11 +30,11 @@ struct nvgpu_tsg;
struct gk20a; struct gk20a;
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next);
int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch, int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
bool wait_preempt); bool wait_preempt);
#endif #endif
#ifdef CONFIG_NVGPU_HAL_NON_FUSA #ifdef CONFIG_NVGPU_HAL_NON_FUSA
int gk20a_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next);
u32 gk20a_runlist_count_max(void); u32 gk20a_runlist_count_max(void);
#endif #endif

View File

@@ -35,6 +35,7 @@
#include <nvgpu/fuse.h> #include <nvgpu/fuse.h>
#include <nvgpu/pbdma.h> #include <nvgpu/pbdma.h>
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/preempt.h>
#include <nvgpu/regops.h> #include <nvgpu/regops.h>
#include <nvgpu/gr/gr_falcon.h> #include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/gr/gr.h> #include <nvgpu/gr/gr.h>
@@ -936,9 +937,8 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
.fifo_suspend = nvgpu_fifo_suspend, .fifo_suspend = nvgpu_fifo_suspend,
.init_fifo_setup_hw = gv11b_init_fifo_setup_hw, .init_fifo_setup_hw = gv11b_init_fifo_setup_hw,
.preempt_channel = gv11b_fifo_preempt_channel, .preempt_channel = gv11b_fifo_preempt_channel,
.preempt_tsg = gv11b_fifo_preempt_tsg, .preempt_tsg = nvgpu_fifo_preempt_tsg,
.preempt_trigger = gv11b_fifo_preempt_trigger, .preempt_trigger = gv11b_fifo_preempt_trigger,
.preempt_runlists_for_rc = gv11b_fifo_preempt_runlists_for_rc,
.preempt_poll_pbdma = gv11b_fifo_preempt_poll_pbdma, .preempt_poll_pbdma = gv11b_fifo_preempt_poll_pbdma,
.init_pbdma_map = gk20a_fifo_init_pbdma_map, .init_pbdma_map = gk20a_fifo_init_pbdma_map,
.is_preempt_pending = gv11b_fifo_is_preempt_pending, .is_preempt_pending = gv11b_fifo_is_preempt_pending,
@@ -1072,9 +1072,10 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
.set_eng_method_buffer = gv11b_ramin_set_eng_method_buffer, .set_eng_method_buffer = gv11b_ramin_set_eng_method_buffer,
}, },
.runlist = { .runlist = {
#ifdef NVGPU_CHANNEL_TSG_SCHEULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
.reschedule = gv11b_runlist_reschedule, .reschedule = gv11b_runlist_reschedule,
.reschedule_preempt_next_locked = gk20a_fifo_reschedule_preempt_next, .reschedule_preempt_next_locked =
gk20a_fifo_reschedule_preempt_next,
#endif #endif
.update_for_channel = nvgpu_runlist_update_for_channel, .update_for_channel = nvgpu_runlist_update_for_channel,
.reload = nvgpu_runlist_reload, .reload = nvgpu_runlist_reload,

View File

@@ -21,6 +21,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include <nvgpu/preempt.h>
#include "hal/mm/mm_gm20b.h" #include "hal/mm/mm_gm20b.h"
#include "hal/mm/mm_gp10b.h" #include "hal/mm/mm_gp10b.h"
@@ -961,9 +962,8 @@ static const struct gpu_ops tu104_ops = {
.fifo_suspend = nvgpu_fifo_suspend, .fifo_suspend = nvgpu_fifo_suspend,
.init_fifo_setup_hw = tu104_init_fifo_setup_hw, .init_fifo_setup_hw = tu104_init_fifo_setup_hw,
.preempt_channel = gv11b_fifo_preempt_channel, .preempt_channel = gv11b_fifo_preempt_channel,
.preempt_tsg = gv11b_fifo_preempt_tsg, .preempt_tsg = nvgpu_fifo_preempt_tsg,
.preempt_trigger = gv11b_fifo_preempt_trigger, .preempt_trigger = gv11b_fifo_preempt_trigger,
.preempt_runlists_for_rc = gv11b_fifo_preempt_runlists_for_rc,
.preempt_poll_pbdma = gv11b_fifo_preempt_poll_pbdma, .preempt_poll_pbdma = gv11b_fifo_preempt_poll_pbdma,
.init_pbdma_map = gk20a_fifo_init_pbdma_map, .init_pbdma_map = gk20a_fifo_init_pbdma_map,
.is_preempt_pending = gv11b_fifo_is_preempt_pending, .is_preempt_pending = gv11b_fifo_is_preempt_pending,

View File

@@ -218,7 +218,7 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,
* that all PBDMAs serving the engine are not loaded when engine is * that all PBDMAs serving the engine are not loaded when engine is
* reset. * reset.
*/ */
g->ops.fifo.preempt_runlists_for_rc(g, runlists_mask); nvgpu_fifo_preempt_runlists_for_rc(g, runlists_mask);
/* /*
* For each PBDMA which serves the runlist, poll to verify the TSG is no * For each PBDMA which serves the runlist, poll to verify the TSG is no
* longer on the PBDMA and the engine phase of the preempt has started. * longer on the PBDMA and the engine phase of the preempt has started.

View File

@@ -205,6 +205,10 @@
* H/w defined value for Tsg ID type * H/w defined value for Tsg ID type
*/ */
#define ID_TYPE_TSG 1U #define ID_TYPE_TSG 1U
/**
* S/w defined value for Runlist ID type
*/
#define ID_TYPE_RUNLIST 2U
/** /**
* S/w defined value for unknown ID type. * S/w defined value for unknown ID type.
*/ */

View File

@@ -92,25 +92,6 @@ struct gops_fifo {
*/ */
int (*preempt_tsg)(struct gk20a *g, struct nvgpu_tsg *tsg); int (*preempt_tsg)(struct gk20a *g, struct nvgpu_tsg *tsg);
/**
* @brief Preempt a set of runlists.
*
* @param g [in] Pointer to GPU driver struct.
* @param runlists_mask [in] Bitmask of runlists to preempt.
*
* Preempt runlists in \a runlists_mask:
* - Write h/w register to trigger preempt on runlists.
* - All TSG in those runlists are preempted.
*
* @note This HAL is called in case of critical error, and does
* not poll PBDMAs or engines to wait for preempt completion.
*
* @note This HAL should be called with runlist lock held for all
* the runlists in \a runlists_mask.
*/
void (*preempt_runlists_for_rc)(struct gk20a *g,
u32 runlists_bitmask);
/** /**
* @brief Enable and configure FIFO. * @brief Enable and configure FIFO.
* *
@@ -179,8 +160,21 @@ struct gops_fifo {
void (*cleanup_sw)(struct gk20a *g); void (*cleanup_sw)(struct gk20a *g);
int (*init_fifo_setup_hw)(struct gk20a *g); int (*init_fifo_setup_hw)(struct gk20a *g);
int (*preempt_channel)(struct gk20a *g, struct nvgpu_channel *ch); int (*preempt_channel)(struct gk20a *g, struct nvgpu_channel *ch);
void (*preempt_trigger)(struct gk20a *g, /**
u32 id, unsigned int id_type); * @brief Preempt requested channel,tsg or runlist.
*
* @param g [in] Pointer to GPU driver struct.
* @param id [in] Tsg or channel or hardware runlist id
* @param id_type [in] channel,tsg or runlist type
*
* Depending on given \a id_type:
* - Preempt channel
* - Preempt tsg
* - Preempt runlist
*
* @return: None
*/
void (*preempt_trigger)(struct gk20a *g, u32 id, unsigned int id_type);
int (*preempt_poll_pbdma)(struct gk20a *g, u32 tsgid, int (*preempt_poll_pbdma)(struct gk20a *g, u32 tsgid,
u32 pbdma_id); u32 pbdma_id);
void (*init_pbdma_map)(struct gk20a *g, void (*init_pbdma_map)(struct gk20a *g,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -72,4 +72,48 @@ int nvgpu_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
*/ */
void nvgpu_preempt_poll_tsg_on_pbdma(struct gk20a *g, void nvgpu_preempt_poll_tsg_on_pbdma(struct gk20a *g,
struct nvgpu_tsg *tsg); struct nvgpu_tsg *tsg);
/**
* @brief Preempt a set of runlists.
*
* @param g [in] Pointer to GPU driver struct.
* @param runlists_bitmask [in] Bitmask of runlists to preempt.
*
* Preempt runlists in \a runlists_bitmask:
* - Write h/w register to trigger preempt on runlists.
* - All TSG in those runlists are preempted.
*
* @note This function is called in case of recovery for error, and does
* not poll PBDMAs or engines to wait for preempt completion.
*
* @note This function should be called with runlist lock held for all
* the runlists in \a runlists_bitmask.
*/
void nvgpu_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_bitmask);
/**
* @brief Preempt TSG.
*
* @param g [in] Pointer to GPU driver struct.
* @param tsg [in] Pointer to TSG struct.
*
* Preempt TSG:
* - Acquire lock for active runlist.
* - Write h/w register to trigger TSG preempt for \a tsg.
* - Preemption mode (e.g. CTA or WFI) depends on the preemption
* mode configured in the GR context.
* - Release lock acquired for active runlist.
* - Poll PBDMAs and engines status until preemption is complete,
* or poll timeout occurs.
*
* On some chips, it is also needed to disable scheduling
* before preempting TSG.
*
* @see nvgpu_preempt_get_timeout
* @see nvgpu_gr_ctx::compute_preempt_mode
*
* @return 0 in case preemption succeeded, < 0 in case of failure.
* @retval -ETIMEDOUT when preemption was triggered, but did not
* complete within preemption poll timeout.
*/
int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg);
#endif /* NVGPU_PREEMPT_H */ #endif /* NVGPU_PREEMPT_H */

View File

@@ -153,9 +153,7 @@ gv11b_fifo_is_preempt_pending
gv11b_fifo_mmu_fault_id_to_pbdma_id gv11b_fifo_mmu_fault_id_to_pbdma_id
gv11b_fifo_preempt_channel gv11b_fifo_preempt_channel
gv11b_fifo_preempt_poll_pbdma gv11b_fifo_preempt_poll_pbdma
gv11b_fifo_preempt_runlists_for_rc
gv11b_fifo_preempt_trigger gv11b_fifo_preempt_trigger
gv11b_fifo_preempt_tsg
gv11b_get_litter_value gv11b_get_litter_value
gv11b_gpu_phys_addr gv11b_gpu_phys_addr
gv11b_init_fifo_reset_enable_hw gv11b_init_fifo_reset_enable_hw
@@ -411,6 +409,8 @@ nvgpu_fbp_remove_support
nvgpu_fifo_cleanup_sw_common nvgpu_fifo_cleanup_sw_common
nvgpu_fifo_decode_pbdma_ch_eng_status nvgpu_fifo_decode_pbdma_ch_eng_status
nvgpu_fifo_init_support nvgpu_fifo_init_support
nvgpu_fifo_preempt_runlists_for_rc
nvgpu_fifo_preempt_tsg
nvgpu_fifo_suspend nvgpu_fifo_suspend
nvgpu_fifo_sw_quiesce nvgpu_fifo_sw_quiesce
nvgpu_finalize_poweron nvgpu_finalize_poweron

View File

@@ -32,6 +32,7 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/fifo.h> #include <nvgpu/fifo.h>
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
#include <nvgpu/preempt.h>
#include <nvgpu/soc.h> #include <nvgpu/soc.h>
#include <nvgpu/pbdma_status.h> #include <nvgpu/pbdma_status.h>
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h> #include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
@@ -135,7 +136,7 @@ int test_gv11b_fifo_preempt_runlists_for_rc(struct unit_module *m,
0U, 0U); 0U, 0U);
reg_val = nvgpu_readl(g, fifo_runlist_preempt_r()); reg_val = nvgpu_readl(g, fifo_runlist_preempt_r());
gv11b_fifo_preempt_runlists_for_rc(g, runlist_mask); nvgpu_fifo_preempt_runlists_for_rc(g, runlist_mask);
unit_assert(nvgpu_readl(g, fifo_runlist_preempt_r()) == unit_assert(nvgpu_readl(g, fifo_runlist_preempt_r()) ==
(reg_val | runlist_mask), goto done); (reg_val | runlist_mask), goto done);
@@ -345,7 +346,7 @@ done:
} }
static void stub_fifo_preempt_trigger(struct gk20a *g, u32 id, static void stub_fifo_preempt_trigger(struct gk20a *g, u32 id,
unsigned int id_type) unsigned int id_type)
{ {
} }
@@ -419,7 +420,7 @@ int test_gv11b_fifo_preempt_tsg(struct unit_module *m, struct gk20a *g,
branches & F_PREEMPT_TSG_PLATFORM_SILICON ? branches & F_PREEMPT_TSG_PLATFORM_SILICON ?
true : false; true : false;
err = EXPECT_BUG(gv11b_fifo_preempt_tsg(g, tsg)); err = EXPECT_BUG(nvgpu_fifo_preempt_tsg(g, tsg));
if (branches & F_PREEMPT_TSG_PREEMPT_LOCKED_FAIL) { if (branches & F_PREEMPT_TSG_PREEMPT_LOCKED_FAIL) {
if (branches & F_PREEMPT_TSG_PLATFORM_SILICON) { if (branches & F_PREEMPT_TSG_PLATFORM_SILICON) {

View File

@@ -699,10 +699,6 @@ static void mock_runlist_write_state(struct gk20a *g, u32 runlist_mask,
{ {
} }
static void mock_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlist_mask)
{
}
int test_quiesce(struct unit_module *m, struct gk20a *g, void *args) int test_quiesce(struct unit_module *m, struct gk20a *g, void *args)
{ {
int ret = UNIT_SUCCESS; int ret = UNIT_SUCCESS;
@@ -733,7 +729,6 @@ int test_quiesce(struct unit_module *m, struct gk20a *g, void *args)
/* mock out fifo HALs called during quiesce */ /* mock out fifo HALs called during quiesce */
g->ops.runlist.write_state = mock_runlist_write_state; g->ops.runlist.write_state = mock_runlist_write_state;
g->ops.fifo.preempt_runlists_for_rc = mock_fifo_preempt_runlists_for_rc;
nvgpu_sw_quiesce(g); nvgpu_sw_quiesce(g);
/* wait for quiesce thread to complete */ /* wait for quiesce thread to complete */

View File

@@ -36,6 +36,7 @@
#include <nvgpu/vm.h> #include <nvgpu/vm.h>
#include <nvgpu/tsg.h> #include <nvgpu/tsg.h>
#include <nvgpu/engines.h> #include <nvgpu/engines.h>
#include <nvgpu/preempt.h>
#include <nvgpu/nvgpu_init.h> #include <nvgpu/nvgpu_init.h>
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h> #include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h> #include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
@@ -645,7 +646,7 @@ int test_handle_mmu_fault_common(struct unit_module *m,
g->ops.channel.unbind = gv11b_channel_unbind; g->ops.channel.unbind = gv11b_channel_unbind;
g->ops.channel.free_inst = nvgpu_channel_free_inst; g->ops.channel.free_inst = nvgpu_channel_free_inst;
g->ops.tsg.disable = nvgpu_tsg_disable; g->ops.tsg.disable = nvgpu_tsg_disable;
g->ops.fifo.preempt_tsg = gv11b_fifo_preempt_tsg; g->ops.fifo.preempt_tsg = nvgpu_fifo_preempt_tsg;
g->aggressive_sync_destroy_thresh = 0U; g->aggressive_sync_destroy_thresh = 0U;
g->fifo.g = g; g->fifo.g = g;