mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: runlist_lock released before preempt timeout recovery
Release runlist_lock and then initiate recovery if preempt timed out. Also do not issue preempt if ch, tsg or runlist id is invalid. tsgid could be invalid for below call trace gk20a_prepare_poweroff->gk20a_channel_suspend-> *_fifo_preempt_channel->*_fifo_preempt_tsg Bug 2065990 Bug 2043838 Change-Id: Ia1e3c134f06743e1258254a4a6f7256831706185 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1662656 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
4d63729ac8
commit
4654d9abd1
@@ -2721,7 +2721,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
|
||||
void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
|
||||
unsigned int id_type)
|
||||
{
|
||||
if (id_type == ID_TYPE_TSG) {
|
||||
@@ -2764,7 +2764,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
|
||||
int ret;
|
||||
unsigned int id_type;
|
||||
|
||||
nvgpu_log_fn(g, "%d", id);
|
||||
nvgpu_log_fn(g, "id: %d is_tsg: %d", id, is_tsg);
|
||||
|
||||
/* issue preempt */
|
||||
gk20a_fifo_issue_preempt(g, id, is_tsg);
|
||||
@@ -2774,10 +2774,6 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
|
||||
/* wait for preempt */
|
||||
ret = g->ops.fifo.is_preempt_pending(g, id, id_type,
|
||||
PREEMPT_TIMEOUT_RC);
|
||||
|
||||
if (ret)
|
||||
__locked_fifo_preempt_timeout_rc(g, id, id_type);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2789,7 +2785,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
|
||||
u32 mutex_ret = 0;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_fn(g, "%d", chid);
|
||||
nvgpu_log_fn(g, "chid: %d", chid);
|
||||
if (chid == FIFO_INVAL_CHANNEL_ID)
|
||||
return 0;
|
||||
|
||||
/* we have no idea which runlist we are using. lock all */
|
||||
for (i = 0; i < g->fifo.max_runlists; i++)
|
||||
@@ -2805,6 +2803,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
|
||||
for (i = 0; i < g->fifo.max_runlists; i++)
|
||||
nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
|
||||
|
||||
if (ret)
|
||||
gk20a_fifo_preempt_timeout_rc(g, chid, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2816,7 +2817,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
u32 mutex_ret = 0;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_fn(g, "%d", tsgid);
|
||||
nvgpu_log_fn(g, "tsgid: %d", tsgid);
|
||||
if (tsgid == FIFO_INVAL_TSG_ID)
|
||||
return 0;
|
||||
|
||||
/* we have no idea which runlist we are using. lock all */
|
||||
for (i = 0; i < g->fifo.max_runlists; i++)
|
||||
@@ -2832,6 +2835,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
for (i = 0; i < g->fifo.max_runlists; i++)
|
||||
nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
|
||||
|
||||
if (ret)
|
||||
gk20a_fifo_preempt_timeout_rc(g, tsgid, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ enum {
|
||||
#define FIFO_INVAL_ENGINE_ID ((u32)~0)
|
||||
#define FIFO_INVAL_CHANNEL_ID ((u32)~0)
|
||||
#define FIFO_INVAL_TSG_ID ((u32)~0)
|
||||
#define FIFO_INVAL_RUNLIST_ID ((u32)~0)
|
||||
|
||||
#define ID_TYPE_CHANNEL 0
|
||||
#define ID_TYPE_TSG 1
|
||||
@@ -374,7 +375,7 @@ u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g);
|
||||
int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, unsigned int id_type,
|
||||
unsigned int timeout_rc_type);
|
||||
int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg);
|
||||
void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
|
||||
void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
|
||||
unsigned int id_type);
|
||||
int gk20a_fifo_setup_ramfc(struct channel_gk20a *c,
|
||||
u64 gpfifo_base, u32 gpfifo_entries,
|
||||
|
||||
@@ -757,6 +757,9 @@ int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid)
|
||||
struct fifo_gk20a *f = &g->fifo;
|
||||
u32 tsgid;
|
||||
|
||||
if (chid == FIFO_INVAL_CHANNEL_ID)
|
||||
return 0;
|
||||
|
||||
tsgid = f->channel[chid].tsgid;
|
||||
nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid);
|
||||
|
||||
@@ -813,10 +816,14 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
u32 mutex_ret = 0;
|
||||
u32 runlist_id;
|
||||
|
||||
nvgpu_log_fn(g, "%d", tsgid);
|
||||
nvgpu_log_fn(g, "tsgid: %d", tsgid);
|
||||
if (tsgid == FIFO_INVAL_TSG_ID)
|
||||
return 0;
|
||||
|
||||
runlist_id = f->tsg[tsgid].runlist_id;
|
||||
nvgpu_log_fn(g, "runlist_id %d", runlist_id);
|
||||
nvgpu_log_fn(g, "runlist_id: %d", runlist_id);
|
||||
if (runlist_id == FIFO_INVAL_RUNLIST_ID)
|
||||
return 0;
|
||||
|
||||
nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock);
|
||||
|
||||
@@ -829,6 +836,9 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
||||
|
||||
nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
|
||||
|
||||
if (ret)
|
||||
gk20a_fifo_preempt_timeout_rc(g, tsgid, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -888,7 +898,7 @@ static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
|
||||
timeout_rc_type);
|
||||
|
||||
if (ret && (timeout_rc_type == PREEMPT_TIMEOUT_RC))
|
||||
__locked_fifo_preempt_timeout_rc(g, id, id_type);
|
||||
gk20a_fifo_preempt_timeout_rc(g, id, id_type);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user