mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: use TSG recover API
Use TSG specific API gk20a_fifo_recover_tsg() in following cases : - IOCTL_CHANNEL_FORCE_RESET to force reset a channel in TSG, reset all the channels - handle pbdma intr while resetting in case of pbdma intr, if channel is part of TSG, recover entire TSG - TSG preempt failure when TSG preempt times out, use TSG recover API Use preempt_tsg() API to preempt if channel is part of TSG Add below two generic APIs which will take care of preempting/ recovering either of channel or TSG as required gk20a_fifo_preempt() gk20a_fifo_force_reset_ch() Bug 1470692 Change-Id: I8d46e252af79136be85a9a2accf8b51bd924ca8c Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/497875 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
committed by
Dan Willemsen
parent
2f232348e6
commit
1c7dcfdeef
@@ -2293,8 +2293,6 @@ long gk20a_channel_ioctl(struct file *filp,
|
|||||||
gk20a_idle(dev);
|
gk20a_idle(dev);
|
||||||
break;
|
break;
|
||||||
case NVHOST_IOCTL_CHANNEL_PREEMPT:
|
case NVHOST_IOCTL_CHANNEL_PREEMPT:
|
||||||
if (gk20a_is_channel_marked_as_tsg(ch))
|
|
||||||
return -EINVAL;
|
|
||||||
err = gk20a_busy(dev);
|
err = gk20a_busy(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&dev->dev,
|
dev_err(&dev->dev,
|
||||||
@@ -2302,8 +2300,7 @@ long gk20a_channel_ioctl(struct file *filp,
|
|||||||
__func__, cmd);
|
__func__, cmd);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
/* preempt channel */
|
err = gk20a_fifo_preempt(ch->g, ch);
|
||||||
err = gk20a_fifo_preempt_channel(ch->g, ch->hw_chid);
|
|
||||||
gk20a_idle(dev);
|
gk20a_idle(dev);
|
||||||
break;
|
break;
|
||||||
case NVHOST_IOCTL_CHANNEL_FORCE_RESET:
|
case NVHOST_IOCTL_CHANNEL_FORCE_RESET:
|
||||||
@@ -2314,9 +2311,7 @@ long gk20a_channel_ioctl(struct file *filp,
|
|||||||
__func__, cmd);
|
__func__, cmd);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
gk20a_set_error_notifier(ch,
|
err = gk20a_fifo_force_reset_ch(ch, true);
|
||||||
NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR);
|
|
||||||
gk20a_fifo_recover_ch(ch->g, ch->hw_chid, true);
|
|
||||||
gk20a_idle(dev);
|
gk20a_idle(dev);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -1291,6 +1291,29 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
|
|||||||
g->ops.fifo.trigger_mmu_fault(g, engine_ids);
|
g->ops.fifo.trigger_mmu_fault(g, engine_ids);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose)
|
||||||
|
{
|
||||||
|
struct tsg_gk20a *tsg = NULL;
|
||||||
|
struct channel_gk20a *ch_tsg = NULL;
|
||||||
|
|
||||||
|
if (gk20a_is_channel_marked_as_tsg(ch)) {
|
||||||
|
tsg = &ch->g->fifo.tsg[ch->hw_chid];
|
||||||
|
|
||||||
|
mutex_lock(&tsg->ch_list_lock);
|
||||||
|
list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
|
||||||
|
gk20a_set_error_notifier(ch_tsg,
|
||||||
|
NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR);
|
||||||
|
}
|
||||||
|
mutex_unlock(&tsg->ch_list_lock);
|
||||||
|
gk20a_fifo_recover_tsg(ch->g, ch->tsgid, verbose);
|
||||||
|
} else {
|
||||||
|
gk20a_set_error_notifier(ch,
|
||||||
|
NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR);
|
||||||
|
gk20a_fifo_recover_ch(ch->g, ch->hw_chid, verbose);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
|
static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
|
||||||
{
|
{
|
||||||
@@ -1482,13 +1505,26 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
|
|||||||
if (reset) {
|
if (reset) {
|
||||||
/* Remove the channel from runlist */
|
/* Remove the channel from runlist */
|
||||||
u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id));
|
u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id));
|
||||||
u32 hw_chid = fifo_pbdma_status_id_v(status);
|
u32 id = fifo_pbdma_status_id_v(status);
|
||||||
if (fifo_pbdma_status_id_type_v(status)
|
if (fifo_pbdma_status_id_type_v(status)
|
||||||
== fifo_pbdma_status_id_type_chid_v()) {
|
== fifo_pbdma_status_id_type_chid_v()) {
|
||||||
struct channel_gk20a *ch = &f->channel[hw_chid];
|
struct channel_gk20a *ch = &f->channel[id];
|
||||||
|
|
||||||
gk20a_set_error_notifier(ch,
|
gk20a_set_error_notifier(ch,
|
||||||
NVHOST_CHANNEL_PBDMA_ERROR);
|
NVHOST_CHANNEL_PBDMA_ERROR);
|
||||||
gk20a_fifo_recover_ch(g, hw_chid, true);
|
gk20a_fifo_recover_ch(g, id, true);
|
||||||
|
} else if (fifo_pbdma_status_id_type_v(status)
|
||||||
|
== fifo_pbdma_status_id_type_tsgid_v()) {
|
||||||
|
struct tsg_gk20a *tsg = &f->tsg[id];
|
||||||
|
struct channel_gk20a *ch = NULL;
|
||||||
|
|
||||||
|
mutex_lock(&tsg->ch_list_lock);
|
||||||
|
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
|
||||||
|
gk20a_set_error_notifier(ch,
|
||||||
|
NVHOST_CHANNEL_PBDMA_ERROR);
|
||||||
|
}
|
||||||
|
mutex_unlock(&tsg->ch_list_lock);
|
||||||
|
gk20a_fifo_recover_tsg(g, id, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1606,9 +1642,19 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
|
|||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (is_tsg) {
|
if (is_tsg) {
|
||||||
/* TODO: recovery for TSG */
|
struct tsg_gk20a *tsg = &g->fifo.tsg[id];
|
||||||
|
struct channel_gk20a *ch = NULL;
|
||||||
|
|
||||||
gk20a_err(dev_from_gk20a(g),
|
gk20a_err(dev_from_gk20a(g),
|
||||||
"preempt TSG %d timeout\n", id);
|
"preempt TSG %d timeout\n", id);
|
||||||
|
|
||||||
|
mutex_lock(&tsg->ch_list_lock);
|
||||||
|
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
|
||||||
|
gk20a_set_error_notifier(ch,
|
||||||
|
NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
|
||||||
|
}
|
||||||
|
mutex_unlock(&tsg->ch_list_lock);
|
||||||
|
gk20a_fifo_recover_tsg(g, id, true);
|
||||||
} else {
|
} else {
|
||||||
struct channel_gk20a *ch = &g->fifo.channel[id];
|
struct channel_gk20a *ch = &g->fifo.channel[id];
|
||||||
|
|
||||||
@@ -1678,6 +1724,18 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (gk20a_is_channel_marked_as_tsg(ch))
|
||||||
|
err = gk20a_fifo_preempt_tsg(ch->g, ch->tsgid);
|
||||||
|
else
|
||||||
|
err = gk20a_fifo_preempt_channel(ch->g, ch->hw_chid);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
|
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
|
||||||
struct fifo_engine_info_gk20a *eng_info)
|
struct fifo_engine_info_gk20a *eng_info)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -147,6 +147,7 @@ void gk20a_fifo_nonstall_isr(struct gk20a *g);
|
|||||||
|
|
||||||
int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid);
|
int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid);
|
||||||
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
|
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
|
||||||
|
int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch);
|
||||||
|
|
||||||
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
|
int gk20a_fifo_enable_engine_activity(struct gk20a *g,
|
||||||
struct fifo_engine_info_gk20a *eng_info);
|
struct fifo_engine_info_gk20a *eng_info);
|
||||||
@@ -164,6 +165,7 @@ bool gk20a_fifo_mmu_fault_pending(struct gk20a *g);
|
|||||||
void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, bool verbose);
|
void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, bool verbose);
|
||||||
void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose);
|
void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose);
|
||||||
void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose);
|
void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose);
|
||||||
|
int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose);
|
||||||
int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
|
int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
|
||||||
void gk20a_init_fifo(struct gpu_ops *gops);
|
void gk20a_init_fifo(struct gpu_ops *gops);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user