mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: replace input parameter tsgid with pointer to struct tsg_gk20a
gv11b_fifo_preempt_tsg needs to access the runlist_id of the tsg as well as pass the tsg pointer to other public functions such as gk20a_fifo_disable_tsg_sched. This qualifies the preempt_tsg to use a pointer to a struct tsg_gk20a instead of just using the tsgid. Jira NVGPU-1461 Change-Id: I01fbd2370b5746c2a597a0351e0301b0f7d25175 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1959068 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
e5bebd880f
commit
1e78d47f15
@@ -1462,7 +1462,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, struct tsg_gk20a *tsg, bool preempt)
|
||||
g->ops.fifo.disable_tsg(tsg);
|
||||
|
||||
if (preempt) {
|
||||
g->ops.fifo.preempt_tsg(g, tsg->tsgid);
|
||||
g->ops.fifo.preempt_tsg(g, tsg);
|
||||
}
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
@@ -2099,7 +2099,7 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
|
||||
/* Disable TSG and examine status before unbinding channel */
|
||||
g->ops.fifo.disable_tsg(tsg);
|
||||
|
||||
err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
|
||||
err = g->ops.fifo.preempt_tsg(g, tsg);
|
||||
if (err != 0) {
|
||||
goto fail_enable_tsg;
|
||||
}
|
||||
@@ -2886,7 +2886,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
int ret = 0;
|
||||
@@ -2894,10 +2894,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
int mutex_ret = 0;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_fn(g, "tsgid: %d", tsgid);
|
||||
if (tsgid == FIFO_INVAL_TSG_ID) {
|
||||
return 0;
|
||||
}
|
||||
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
|
||||
|
||||
/* we have no idea which runlist we are using. lock all */
|
||||
for (i = 0; i < g->fifo.max_runlists; i++) {
|
||||
@@ -2906,7 +2903,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = __locked_fifo_preempt(g, tsgid, true);
|
||||
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
|
||||
|
||||
if (mutex_ret == 0) {
|
||||
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
@@ -2919,9 +2916,11 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
if (ret != 0) {
|
||||
if (nvgpu_platform_is_silicon(g)) {
|
||||
nvgpu_err(g, "preempt timed out for tsgid: %u, "
|
||||
"ctxsw timeout will trigger recovery if needed", tsgid);
|
||||
"ctxsw timeout will trigger recovery if needed",
|
||||
tsg->tsgid);
|
||||
} else {
|
||||
gk20a_fifo_preempt_timeout_rc(g, tsgid, ID_TYPE_TSG);
|
||||
gk20a_fifo_preempt_timeout_rc(g,
|
||||
tsg->tsgid, ID_TYPE_TSG);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2931,9 +2930,10 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
|
||||
{
|
||||
int err;
|
||||
struct tsg_gk20a *tsg = tsg_gk20a_from_ch(ch);
|
||||
|
||||
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||
err = g->ops.fifo.preempt_tsg(ch->g, ch->tsgid);
|
||||
if (tsg != NULL) {
|
||||
err = g->ops.fifo.preempt_tsg(ch->g, tsg);
|
||||
} else {
|
||||
err = g->ops.fifo.preempt_channel(ch->g, ch->chid);
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ void gk20a_fifo_isr(struct gk20a *g);
|
||||
u32 gk20a_fifo_nonstall_isr(struct gk20a *g);
|
||||
|
||||
int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
|
||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
|
||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
|
||||
int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
|
||||
|
||||
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/semaphore.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/log.h>
|
||||
@@ -841,17 +842,22 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid)
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
u32 tsgid;
|
||||
struct tsg_gk20a *tsg = NULL;
|
||||
|
||||
if (chid == FIFO_INVAL_CHANNEL_ID) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
tsgid = f->channel[chid].tsgid;
|
||||
nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid);
|
||||
tsg = tsg_gk20a_from_ch(&f->channel[chid]);
|
||||
|
||||
if (tsg == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsg->tsgid);
|
||||
|
||||
/* Preempt tsg. Channel preempt is NOOP */
|
||||
return g->ops.fifo.preempt_tsg(g, tsgid);
|
||||
return g->ops.fifo.preempt_tsg(g, tsg);
|
||||
}
|
||||
|
||||
/* TSG enable sequence applicable for Volta and onwards */
|
||||
@@ -875,7 +881,7 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
{
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
int ret = 0;
|
||||
@@ -883,12 +889,9 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
u32 mutex_ret = 0;
|
||||
u32 runlist_id;
|
||||
|
||||
nvgpu_log_fn(g, "tsgid: %d", tsgid);
|
||||
if (tsgid == FIFO_INVAL_TSG_ID) {
|
||||
return 0;
|
||||
}
|
||||
nvgpu_log_fn(g, "tsgid: %d", tsg->tsgid);
|
||||
|
||||
runlist_id = f->tsg[tsgid].runlist_id;
|
||||
runlist_id = tsg->runlist_id;
|
||||
nvgpu_log_fn(g, "runlist_id: %d", runlist_id);
|
||||
if (runlist_id == FIFO_INVAL_RUNLIST_ID) {
|
||||
return 0;
|
||||
@@ -897,27 +900,27 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
|
||||
|
||||
/* WAR for Bug 2065990 */
|
||||
gk20a_fifo_disable_tsg_sched(g, &f->tsg[tsgid]);
|
||||
gk20a_fifo_disable_tsg_sched(g, tsg);
|
||||
|
||||
mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
|
||||
ret = __locked_fifo_preempt(g, tsgid, true);
|
||||
ret = __locked_fifo_preempt(g, tsg->tsgid, true);
|
||||
|
||||
if (mutex_ret == 0U) {
|
||||
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
|
||||
}
|
||||
|
||||
/* WAR for Bug 2065990 */
|
||||
gk20a_fifo_enable_tsg_sched(g, &f->tsg[tsgid]);
|
||||
gk20a_fifo_enable_tsg_sched(g, tsg);
|
||||
|
||||
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
|
||||
|
||||
if (ret != 0) {
|
||||
if (nvgpu_platform_is_silicon(g)) {
|
||||
nvgpu_err(g, "preempt timed out for tsgid: %u, "
|
||||
"ctxsw timeout will trigger recovery if needed", tsgid);
|
||||
"ctxsw timeout will trigger recovery if needed", tsg->tsgid);
|
||||
} else {
|
||||
gk20a_fifo_preempt_timeout_rc(g, tsgid, true);
|
||||
gk20a_fifo_preempt_timeout_rc(g, tsg->tsgid, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ int gv11b_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next);
|
||||
int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
unsigned int id_type);
|
||||
int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid);
|
||||
int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
|
||||
int gv11b_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
|
||||
int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg);
|
||||
void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
||||
u32 id, unsigned int id_type, unsigned int rc_type,
|
||||
|
||||
@@ -651,7 +651,7 @@ struct gpu_ops {
|
||||
u32 flags);
|
||||
int (*resetup_ramfc)(struct channel_gk20a *c);
|
||||
int (*preempt_channel)(struct gk20a *g, u32 chid);
|
||||
int (*preempt_tsg)(struct gk20a *g, u32 tsgid);
|
||||
int (*preempt_tsg)(struct gk20a *g, struct tsg_gk20a *tsg);
|
||||
int (*enable_tsg)(struct tsg_gk20a *tsg);
|
||||
int (*disable_tsg)(struct tsg_gk20a *tsg);
|
||||
int (*tsg_verify_channel_status)(struct channel_gk20a *ch);
|
||||
|
||||
@@ -699,7 +699,7 @@ long nvgpu_ioctl_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
|
||||
return err;
|
||||
}
|
||||
/* preempt TSG */
|
||||
err = g->ops.fifo.preempt_tsg(g, tsg->tsgid);
|
||||
err = g->ops.fifo.preempt_tsg(g, tsg);
|
||||
gk20a_idle(g);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -473,7 +473,7 @@ int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid)
|
||||
return err;
|
||||
}
|
||||
|
||||
int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
|
||||
{
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
struct tegra_vgpu_tsg_preempt_params *p =
|
||||
@@ -484,13 +484,13 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
|
||||
msg.cmd = TEGRA_VGPU_CMD_TSG_PREEMPT;
|
||||
msg.handle = vgpu_get_handle(g);
|
||||
p->tsg_id = tsgid;
|
||||
p->tsg_id = tsg->tsgid;
|
||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||
err = err ? err : msg.ret;
|
||||
|
||||
if (err) {
|
||||
nvgpu_err(g,
|
||||
"preempt tsg %u failed", tsgid);
|
||||
"preempt tsg %u failed", tsg->tsgid);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
@@ -42,7 +42,7 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
|
||||
unsigned long acquire_timeout, u32 flags);
|
||||
int vgpu_fifo_init_engine_info(struct fifo_gk20a *f);
|
||||
int vgpu_fifo_preempt_channel(struct gk20a *g, u32 chid);
|
||||
int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
|
||||
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
|
||||
int vgpu_fifo_update_runlist(struct gk20a *g, u32 runlist_id,
|
||||
u32 chid, bool add, bool wait_for_finish);
|
||||
int vgpu_fifo_wait_engine_idle(struct gk20a *g);
|
||||
|
||||
Reference in New Issue
Block a user