diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index aada30656..22dc1d60a 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -2721,7 +2721,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, return ret; } -void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, +void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, unsigned int id_type) { if (id_type == ID_TYPE_TSG) { @@ -2764,7 +2764,7 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) int ret; unsigned int id_type; - nvgpu_log_fn(g, "%d", id); + nvgpu_log_fn(g, "id: %d is_tsg: %d", id, is_tsg); /* issue preempt */ gk20a_fifo_issue_preempt(g, id, is_tsg); @@ -2774,10 +2774,6 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) /* wait for preempt */ ret = g->ops.fifo.is_preempt_pending(g, id, id_type, PREEMPT_TIMEOUT_RC); - - if (ret) - __locked_fifo_preempt_timeout_rc(g, id, id_type); - return ret; } @@ -2789,7 +2785,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) u32 mutex_ret = 0; u32 i; - nvgpu_log_fn(g, "%d", chid); + nvgpu_log_fn(g, "chid: %d", chid); + if (chid == FIFO_INVAL_CHANNEL_ID) + return 0; /* we have no idea which runlist we are using. lock all */ for (i = 0; i < g->fifo.max_runlists; i++) @@ -2805,6 +2803,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid) for (i = 0; i < g->fifo.max_runlists; i++) nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); + if (ret) + gk20a_fifo_preempt_timeout_rc(g, chid, false); + return ret; } @@ -2816,7 +2817,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) u32 mutex_ret = 0; u32 i; - nvgpu_log_fn(g, "%d", tsgid); + nvgpu_log_fn(g, "tsgid: %d", tsgid); + if (tsgid == FIFO_INVAL_TSG_ID) + return 0; /* we have no idea which runlist we are using. lock all */ for (i = 0; i < g->fifo.max_runlists; i++) @@ -2832,6 +2835,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) for (i = 0; i < g->fifo.max_runlists; i++) nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); + if (ret) + gk20a_fifo_preempt_timeout_rc(g, tsgid, true); + return ret; } diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index 7216302cd..5866dd1b5 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h @@ -44,6 +44,7 @@ enum { #define FIFO_INVAL_ENGINE_ID ((u32)~0) #define FIFO_INVAL_CHANNEL_ID ((u32)~0) #define FIFO_INVAL_TSG_ID ((u32)~0) +#define FIFO_INVAL_RUNLIST_ID ((u32)~0) #define ID_TYPE_CHANNEL 0 #define ID_TYPE_TSG 1 @@ -374,7 +375,7 @@ u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g); int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, unsigned int id_type, unsigned int timeout_rc_type); int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg); -void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, +void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, unsigned int id_type); int gk20a_fifo_setup_ramfc(struct channel_gk20a *c, u64 gpfifo_base, u32 gpfifo_entries, diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 9843c7de2..30e030929 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -757,6 +757,9 @@ int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid) struct fifo_gk20a *f = &g->fifo; u32 tsgid; + if (chid == FIFO_INVAL_CHANNEL_ID) + return 0; + tsgid = f->channel[chid].tsgid; nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid); @@ -813,10 +816,14 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) u32 mutex_ret = 0; u32 runlist_id; - nvgpu_log_fn(g, "%d", tsgid); + nvgpu_log_fn(g, "tsgid: %d", tsgid); + if (tsgid == FIFO_INVAL_TSG_ID) + return 0; runlist_id = f->tsg[tsgid].runlist_id; - nvgpu_log_fn(g, "runlist_id %d", runlist_id); + nvgpu_log_fn(g, "runlist_id: %d", runlist_id); + if (runlist_id == FIFO_INVAL_RUNLIST_ID) + return 0; nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); @@ -829,6 +836,9 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); + if (ret) + gk20a_fifo_preempt_timeout_rc(g, tsgid, true); + return ret; } @@ -888,7 +898,7 @@ static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, timeout_rc_type); if (ret && (timeout_rc_type == PREEMPT_TIMEOUT_RC)) - __locked_fifo_preempt_timeout_rc(g, id, id_type); + gk20a_fifo_preempt_timeout_rc(g, id, id_type); return ret; }