mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: .preempt_tsg move to use runlist_id/tsgid
It's for making .preempt_tsg reusable on server side. Jira GVSCI-15770 Change-Id: Id9f477baa29cb63fb0e1d1650f4b1e6a2fa248c0 Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2863441 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Ramesh Mylavarapu <rmylavarapu@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
f20a5b412c
commit
8f5adab299
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -40,8 +40,9 @@ u32 nvgpu_preempt_get_timeout(struct gk20a *g)
|
|||||||
return g->ctxsw_timeout_period_ms;
|
return g->ctxsw_timeout_period_ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
int nvgpu_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
|
struct nvgpu_runlist *runlist;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
u32 preempt_retry_count = 10U;
|
u32 preempt_retry_count = 10U;
|
||||||
u32 preempt_retry_timeout =
|
u32 preempt_retry_timeout =
|
||||||
@@ -51,17 +52,19 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
int mutex_ret = 0;
|
int mutex_ret = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
|
nvgpu_log_fn(g, "tsgid: %d", tsgid);
|
||||||
|
|
||||||
if (tsg->runlist == NULL) {
|
if (runlist_id == INVAL_ID) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runlist = g->fifo.runlists[runlist_id];
|
||||||
|
|
||||||
do {
|
do {
|
||||||
nvgpu_mutex_acquire(&tsg->runlist->runlist_lock);
|
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||||
|
|
||||||
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_2016608)) {
|
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_2016608)) {
|
||||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->id),
|
nvgpu_runlist_set_state(g, BIT32(runlist_id),
|
||||||
RUNLIST_DISABLED);
|
RUNLIST_DISABLED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +72,7 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
||||||
PMU_MUTEX_ID_FIFO, &token);
|
PMU_MUTEX_ID_FIFO, &token);
|
||||||
#endif
|
#endif
|
||||||
g->ops.fifo.preempt_trigger(g, tsg->tsgid, ID_TYPE_TSG);
|
g->ops.fifo.preempt_trigger(g, tsgid, ID_TYPE_TSG);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Poll for preempt done. if stalling interrupts are pending
|
* Poll for preempt done. if stalling interrupts are pending
|
||||||
@@ -80,7 +83,7 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
* the engines hung and set the runlist reset_eng_bitmask
|
* the engines hung and set the runlist reset_eng_bitmask
|
||||||
* and mark preemption completion.
|
* and mark preemption completion.
|
||||||
*/
|
*/
|
||||||
ret = g->ops.fifo.is_preempt_pending(g, tsg->tsgid,
|
ret = g->ops.fifo.is_preempt_pending(g, tsgid,
|
||||||
ID_TYPE_TSG, preempt_retry_count > 1U);
|
ID_TYPE_TSG, preempt_retry_count > 1U);
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_LS_PMU
|
#ifdef CONFIG_NVGPU_LS_PMU
|
||||||
@@ -93,11 +96,11 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_2016608)) {
|
if (nvgpu_is_errata_present(g, NVGPU_ERRATA_2016608)) {
|
||||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->id),
|
nvgpu_runlist_set_state(g, BIT32(runlist_id),
|
||||||
RUNLIST_ENABLED);
|
RUNLIST_ENABLED);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_mutex_release(&tsg->runlist->runlist_lock);
|
nvgpu_mutex_release(&runlist->runlist_lock);
|
||||||
|
|
||||||
if (ret != -EAGAIN) {
|
if (ret != -EAGAIN) {
|
||||||
break;
|
break;
|
||||||
@@ -113,9 +116,9 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
if (nvgpu_platform_is_silicon(g)) {
|
if (nvgpu_platform_is_silicon(g)) {
|
||||||
nvgpu_err(g, "preempt timed out for tsgid: %u, "
|
nvgpu_err(g, "preempt timed out for tsgid: %u, "
|
||||||
"ctxsw timeout will trigger recovery if needed",
|
"ctxsw timeout will trigger recovery if needed",
|
||||||
tsg->tsgid);
|
tsgid);
|
||||||
} else {
|
} else {
|
||||||
nvgpu_rc_preempt_timeout(g, tsg);
|
nvgpu_rc_preempt_timeout(g, &g->fifo.tsg[tsgid]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@@ -127,7 +130,7 @@ int nvgpu_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
|
|||||||
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
|
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
|
||||||
|
|
||||||
if (tsg != NULL) {
|
if (tsg != NULL) {
|
||||||
err = g->ops.fifo.preempt_tsg(ch->g, tsg);
|
err = nvgpu_tsg_preempt(ch->g, tsg);
|
||||||
} else {
|
} else {
|
||||||
err = g->ops.fifo.preempt_channel(ch->g, ch);
|
err = g->ops.fifo.preempt_channel(ch->g, ch);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -574,7 +574,7 @@ static int nvgpu_tsg_unbind_channel_common(struct nvgpu_tsg *tsg,
|
|||||||
/* Disable TSG and examine status before unbinding channel */
|
/* Disable TSG and examine status before unbinding channel */
|
||||||
g->ops.tsg.disable(tsg);
|
g->ops.tsg.disable(tsg);
|
||||||
|
|
||||||
err = g->ops.fifo.preempt_tsg(g, tsg);
|
err = nvgpu_tsg_preempt(g, tsg);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto fail_enable_tsg;
|
goto fail_enable_tsg;
|
||||||
}
|
}
|
||||||
@@ -1604,7 +1604,7 @@ void nvgpu_tsg_abort(struct gk20a *g, struct nvgpu_tsg *tsg, bool preempt)
|
|||||||
* operation will print the error and ctxsw timeout may trigger
|
* operation will print the error and ctxsw timeout may trigger
|
||||||
* a recovery if needed.
|
* a recovery if needed.
|
||||||
*/
|
*/
|
||||||
(void)g->ops.fifo.preempt_tsg(g, tsg);
|
(void)nvgpu_tsg_preempt(g, tsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
@@ -1741,3 +1741,10 @@ int nvgpu_tsg_set_sched_exit_wait_for_errbar(struct nvgpu_channel *ch, bool enab
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
int nvgpu_tsg_preempt(struct gk20a *g, struct nvgpu_tsg *tsg)
|
||||||
|
{
|
||||||
|
u32 runlist_id = tsg->runlist == NULL ? INVAL_ID : tsg->runlist->id;
|
||||||
|
|
||||||
|
return g->ops.fifo.preempt_tsg(g, runlist_id, tsg->tsgid);
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -195,7 +195,7 @@ int nvgpu_gr_setup_alloc_obj_ctx(struct nvgpu_channel *c, u32 class_num,
|
|||||||
|
|
||||||
g->ops.tsg.disable(tsg);
|
g->ops.tsg.disable(tsg);
|
||||||
|
|
||||||
err = g->ops.fifo.preempt_tsg(g, tsg);
|
err = nvgpu_tsg_preempt(g, tsg);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "preempt failed %d", err);
|
nvgpu_err(g, "preempt failed %d", err);
|
||||||
goto enable_tsg;
|
goto enable_tsg;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -60,7 +60,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
struct tegra_vgpu_cmd_msg msg;
|
struct tegra_vgpu_cmd_msg msg;
|
||||||
struct tegra_vgpu_tsg_preempt_params *p =
|
struct tegra_vgpu_tsg_preempt_params *p =
|
||||||
@@ -71,13 +71,14 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
|
|
||||||
msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
|
msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
|
||||||
msg.handle = vgpu_get_handle(g);
|
msg.handle = vgpu_get_handle(g);
|
||||||
p->tsg_id = tsg->tsgid;
|
p->runlist_id = runlist_id;
|
||||||
|
p->tsg_id = tsgid;
|
||||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||||
err = err ? err : msg.ret;
|
err = err ? err : msg.ret;
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"preempt tsg %u failed", tsg->tsgid);
|
"preempt tsg %u failed", tsgid);
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -28,6 +28,6 @@ struct nvgpu_channel;
|
|||||||
struct nvgpu_tsg;
|
struct nvgpu_tsg;
|
||||||
|
|
||||||
int vgpu_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
|
int vgpu_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
|
||||||
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg);
|
int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -144,14 +144,17 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
#ifdef CONFIG_NVGPU_LS_PMU
|
#ifdef CONFIG_NVGPU_LS_PMU
|
||||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||||
int mutex_ret = 0;
|
int mutex_ret = 0;
|
||||||
#endif
|
#endif
|
||||||
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
|
|
||||||
|
(void)runlist_id;
|
||||||
|
|
||||||
|
nvgpu_log_fn(g, "tsgid: %d", tsgid);
|
||||||
|
|
||||||
/* we have no idea which runlist we are using. lock all */
|
/* we have no idea which runlist we are using. lock all */
|
||||||
nvgpu_runlist_lock_active_runlists(g);
|
nvgpu_runlist_lock_active_runlists(g);
|
||||||
@@ -159,7 +162,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
||||||
PMU_MUTEX_ID_FIFO, &token);
|
PMU_MUTEX_ID_FIFO, &token);
|
||||||
#endif
|
#endif
|
||||||
ret = gk20a_fifo_preempt_locked(g, tsg->tsgid, ID_TYPE_TSG);
|
ret = gk20a_fifo_preempt_locked(g, tsgid, ID_TYPE_TSG);
|
||||||
#ifdef CONFIG_NVGPU_LS_PMU
|
#ifdef CONFIG_NVGPU_LS_PMU
|
||||||
if (mutex_ret == 0) {
|
if (mutex_ret == 0) {
|
||||||
if (nvgpu_pmu_lock_release(g, g->pmu,
|
if (nvgpu_pmu_lock_release(g, g->pmu,
|
||||||
@@ -174,10 +177,10 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
|||||||
if (nvgpu_platform_is_silicon(g)) {
|
if (nvgpu_platform_is_silicon(g)) {
|
||||||
nvgpu_err(g, "preempt timed out for tsgid: %u, "
|
nvgpu_err(g, "preempt timed out for tsgid: %u, "
|
||||||
"ctxsw timeout will trigger recovery if needed",
|
"ctxsw timeout will trigger recovery if needed",
|
||||||
tsg->tsgid);
|
tsgid);
|
||||||
} else {
|
} else {
|
||||||
nvgpu_err(g, "preempt TSG %d timeout", tsg->tsgid);
|
nvgpu_err(g, "preempt TSG %d timeout", tsgid);
|
||||||
nvgpu_rc_preempt_timeout(g, tsg);
|
nvgpu_rc_preempt_timeout(g, &g->fifo.tsg[tsgid]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -30,7 +30,7 @@ struct nvgpu_tsg;
|
|||||||
|
|
||||||
void gk20a_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type);
|
void gk20a_fifo_preempt_trigger(struct gk20a *g, u32 id, unsigned int id_type);
|
||||||
int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
|
int gk20a_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch);
|
||||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg);
|
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid);
|
||||||
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||||
unsigned int id_type, bool preempt_retries_left);
|
unsigned int id_type, bool preempt_retries_left);
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -376,5 +376,5 @@ int gv11b_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
|
|||||||
nvgpu_log_info(g, "chid:%d tsgid:%d", ch->chid, tsg->tsgid);
|
nvgpu_log_info(g, "chid:%d tsgid:%d", ch->chid, tsg->tsgid);
|
||||||
|
|
||||||
/* Preempt tsg. Channel preempt is NOOP */
|
/* Preempt tsg. Channel preempt is NOOP */
|
||||||
return g->ops.fifo.preempt_tsg(g, tsg);
|
return nvgpu_tsg_preempt(g, tsg);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -323,7 +323,7 @@ int gv11b_tsg_add_subctx_channel_hw(struct nvgpu_channel *ch, bool replayable)
|
|||||||
nvgpu_mutex_acquire(&tsg->ctx_init_lock);
|
nvgpu_mutex_acquire(&tsg->ctx_init_lock);
|
||||||
|
|
||||||
g->ops.tsg.disable(tsg);
|
g->ops.tsg.disable(tsg);
|
||||||
err = g->ops.fifo.preempt_tsg(g, tsg);
|
err = nvgpu_tsg_preempt(g, tsg);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
g->ops.tsg.enable(tsg);
|
g->ops.tsg.enable(tsg);
|
||||||
nvgpu_mutex_release(&tsg->ctx_init_lock);
|
nvgpu_mutex_release(&tsg->ctx_init_lock);
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GK20A Graphics
|
* GK20A Graphics
|
||||||
*
|
*
|
||||||
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -67,7 +67,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
|
|||||||
|
|
||||||
g->ops.tsg.disable(tsg);
|
g->ops.tsg.disable(tsg);
|
||||||
|
|
||||||
ret = g->ops.fifo.preempt_tsg(g, tsg);
|
ret = nvgpu_tsg_preempt(g, tsg);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
nvgpu_err(g, "failed to preempt TSG");
|
nvgpu_err(g, "failed to preempt TSG");
|
||||||
goto out;
|
goto out;
|
||||||
@@ -130,7 +130,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
|
|||||||
|
|
||||||
g->ops.tsg.disable(tsg);
|
g->ops.tsg.disable(tsg);
|
||||||
|
|
||||||
ret = g->ops.fifo.preempt_tsg(g, tsg);
|
ret = nvgpu_tsg_preempt(g, tsg);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
nvgpu_err(g, "failed to preempt TSG");
|
nvgpu_err(g, "failed to preempt TSG");
|
||||||
goto out;
|
goto out;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -70,7 +70,8 @@ struct gops_fifo {
|
|||||||
* @brief Preempt TSG.
|
* @brief Preempt TSG.
|
||||||
*
|
*
|
||||||
* @param g [in] Pointer to GPU driver struct.
|
* @param g [in] Pointer to GPU driver struct.
|
||||||
* @param tsg [in] Pointer to TSG struct.
|
* @param runlist_id [in] Runlist ID.
|
||||||
|
* @param tsgid [in] TSG ID.
|
||||||
*
|
*
|
||||||
* - Acquire lock for active runlist.
|
* - Acquire lock for active runlist.
|
||||||
* - Write h/w register to trigger TSG preempt for \a tsg.
|
* - Write h/w register to trigger TSG preempt for \a tsg.
|
||||||
@@ -90,7 +91,7 @@ struct gops_fifo {
|
|||||||
* @retval -ETIMEDOUT when preemption was triggered, but did not
|
* @retval -ETIMEDOUT when preemption was triggered, but did not
|
||||||
* complete within preemption poll timeout.
|
* complete within preemption poll timeout.
|
||||||
*/
|
*/
|
||||||
int (*preempt_tsg)(struct gk20a *g, struct nvgpu_tsg *tsg);
|
int (*preempt_tsg)(struct gk20a *g, u32 runlist_id, u32 tsgid);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Enable and configure FIFO.
|
* @brief Enable and configure FIFO.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -94,7 +94,8 @@ void nvgpu_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_bitmask);
|
|||||||
* @brief Preempt TSG.
|
* @brief Preempt TSG.
|
||||||
*
|
*
|
||||||
* @param g [in] Pointer to GPU driver struct.
|
* @param g [in] Pointer to GPU driver struct.
|
||||||
* @param tsg [in] Pointer to TSG struct.
|
* @param runlist_id [in] Runlist ID.
|
||||||
|
* @param tsgid [in] TSG ID.
|
||||||
*
|
*
|
||||||
* Preempt TSG:
|
* Preempt TSG:
|
||||||
* - Acquire lock for active runlist.
|
* - Acquire lock for active runlist.
|
||||||
@@ -115,5 +116,5 @@ void nvgpu_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_bitmask);
|
|||||||
* @retval -ETIMEDOUT when preemption was triggered, but did not
|
* @retval -ETIMEDOUT when preemption was triggered, but did not
|
||||||
* complete within preemption poll timeout.
|
* complete within preemption poll timeout.
|
||||||
*/
|
*/
|
||||||
int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg);
|
int nvgpu_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid);
|
||||||
#endif /* NVGPU_PREEMPT_H */
|
#endif /* NVGPU_PREEMPT_H */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -941,4 +941,15 @@ void nvgpu_tsg_reset_faulted_eng_pbdma(struct gk20a *g, struct nvgpu_tsg *tsg,
|
|||||||
int nvgpu_tsg_set_mmu_debug_mode(struct nvgpu_channel *ch, bool enable);
|
int nvgpu_tsg_set_mmu_debug_mode(struct nvgpu_channel *ch, bool enable);
|
||||||
int nvgpu_tsg_set_sched_exit_wait_for_errbar(struct nvgpu_channel *ch, bool enable);
|
int nvgpu_tsg_set_sched_exit_wait_for_errbar(struct nvgpu_channel *ch, bool enable);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Preempt a tsg
|
||||||
|
*
|
||||||
|
* @param g [in] The GPU driver struct.
|
||||||
|
* @param tsg [in] Pointer to TSG struct.
|
||||||
|
*
|
||||||
|
* Preempt the tsg.
|
||||||
|
*/
|
||||||
|
int nvgpu_tsg_preempt(struct gk20a *g, struct nvgpu_tsg *tsg);
|
||||||
|
|
||||||
#endif /* NVGPU_TSG_H */
|
#endif /* NVGPU_TSG_H */
|
||||||
|
|||||||
@@ -385,6 +385,7 @@ struct tegra_vgpu_tsg_bind_unbind_channel_params {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct tegra_vgpu_tsg_preempt_params {
|
struct tegra_vgpu_tsg_preempt_params {
|
||||||
|
u32 runlist_id;
|
||||||
u32 tsg_id;
|
u32 tsg_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -1203,7 +1203,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
/* preempt TSG */
|
/* preempt TSG */
|
||||||
err = g->ops.fifo.preempt_tsg(g, tsg);
|
err = nvgpu_tsg_preempt(g, tsg);
|
||||||
gk20a_idle(g);
|
gk20a_idle(g);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1536,9 +1536,9 @@ static const char *f_channel_suspend_resume[] = {
|
|||||||
"work_completion_cancel_sync",
|
"work_completion_cancel_sync",
|
||||||
};
|
};
|
||||||
|
|
||||||
static int stub_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
static int stub_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
stub[0].tsgid = tsg->tsgid;
|
stub[0].tsgid = tsgid;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -149,7 +149,7 @@ static int stub_pbdma_handle_intr(struct gk20a *g, u32 pbdma_id, bool recover)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
static int stub_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@@ -276,7 +276,8 @@ int test_gv11b_fifo_preempt_tsg(struct unit_module *m, struct gk20a *g,
|
|||||||
branches & F_PREEMPT_TSG_PLATFORM_SILICON ?
|
branches & F_PREEMPT_TSG_PLATFORM_SILICON ?
|
||||||
true : false;
|
true : false;
|
||||||
|
|
||||||
err = EXPECT_BUG(nvgpu_fifo_preempt_tsg(g, tsg));
|
err = EXPECT_BUG(nvgpu_fifo_preempt_tsg(g,
|
||||||
|
tsg->runlist->id, tsg->tsgid));
|
||||||
|
|
||||||
if (branches & F_PREEMPT_TSG_PREEMPT_LOCKED_FAIL) {
|
if (branches & F_PREEMPT_TSG_PREEMPT_LOCKED_FAIL) {
|
||||||
if (branches & F_PREEMPT_TSG_PLATFORM_SILICON) {
|
if (branches & F_PREEMPT_TSG_PLATFORM_SILICON) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -106,9 +106,9 @@ static int stub_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
static int stub_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
stub[0].tsgid = tsg->tsgid;
|
stub[0].tsgid = tsgid;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -418,7 +418,7 @@ done:
|
|||||||
#define F_TSG_UNBIND_CHANNEL_LAST BIT(9)
|
#define F_TSG_UNBIND_CHANNEL_LAST BIT(9)
|
||||||
|
|
||||||
static int stub_fifo_preempt_tsg_EINVAL(
|
static int stub_fifo_preempt_tsg_EINVAL(
|
||||||
struct gk20a *g, struct nvgpu_tsg *tsg)
|
struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -1305,9 +1305,9 @@ static const char *f_tsg_abort[] = {
|
|||||||
"non_referenceable"
|
"non_referenceable"
|
||||||
};
|
};
|
||||||
|
|
||||||
static int stub_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
static int stub_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
stub[0].tsgid = tsg->tsgid;
|
stub[0].tsgid = tsgid;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -75,8 +75,7 @@ struct gr_gops_org {
|
|||||||
int (*wait_idle)(struct gk20a *g);
|
int (*wait_idle)(struct gk20a *g);
|
||||||
int (*ctrl_ctxsw)(struct gk20a *g, u32 fecs_method,
|
int (*ctrl_ctxsw)(struct gk20a *g, u32 fecs_method,
|
||||||
u32 data, u32 *ret_val);
|
u32 data, u32 *ret_val);
|
||||||
int (*fifo_preempt_tsg)(struct gk20a *g,
|
int (*fifo_preempt_tsg)(struct gk20a *g, u32 runlist_id, u32 tsgid);
|
||||||
struct nvgpu_tsg *tsg);
|
|
||||||
bool (*is_valid)(u32 class_num);
|
bool (*is_valid)(u32 class_num);
|
||||||
bool (*is_valid_compute)(u32 class_num);
|
bool (*is_valid_compute)(u32 class_num);
|
||||||
};
|
};
|
||||||
@@ -129,7 +128,7 @@ static int stub_gr_falcon_ctrl_ctxsw(struct gk20a *g, u32 fecs_method,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stub_gr_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
static int stub_gr_fifo_preempt_tsg(struct gk20a *g, u32 runlist_id, u32 tsgid)
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user