diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c index c77ce207..ac6e07c1 100644 --- a/drivers/gpu/host1x/fence.c +++ b/drivers/gpu/host1x/fence.c @@ -37,8 +37,7 @@ static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f) if (host1x_syncpt_is_expired(sf->sp, sf->threshold)) return false; - /* One reference for interrupt path, one for timeout path. */ - dma_fence_get(f); + /* Reference for interrupt path. */ dma_fence_get(f); /* @@ -46,11 +45,15 @@ static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f) * reference to any fences for which 'enable_signaling' has been * called (and that have not been signalled). * - * We cannot (for now) normally guarantee that all fences get signalled. - * As such, setup a timeout, so that long-lasting fences will get - * reaped eventually. + * We cannot currently always guarantee that all fences get signalled + * or cancelled. As such, for such situations, set up a timeout, so + * that long-lasting fences will get reaped eventually. */ - schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000)); + if (sf->timeout) { + /* Reference for timeout path. */ + dma_fence_get(f); + schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000)); + } host1x_intr_add_fence_locked(sf->sp->host, sf); @@ -80,7 +83,7 @@ void host1x_fence_signal(struct host1x_syncpt_fence *f) return; } - if (cancel_delayed_work(&f->timeout_work)) { + if (f->timeout && cancel_delayed_work(&f->timeout_work)) { /* * We know that the timeout path will not be entered. * Safe to drop the timeout path's reference now. @@ -99,8 +102,9 @@ static void do_fence_timeout(struct work_struct *work) container_of(dwork, struct host1x_syncpt_fence, timeout_work); if (atomic_xchg(&f->signaling, 1)) { - /* Already on interrupt path, drop timeout path reference. */ - dma_fence_put(&f->base); + /* Already on interrupt path, drop timeout path reference if any. */ + if (f->timeout) + dma_fence_put(&f->base); return; } @@ -114,12 +118,12 @@ static void do_fence_timeout(struct work_struct *work) dma_fence_set_error(&f->base, -ETIMEDOUT); dma_fence_signal(&f->base); - - /* Drop timeout path reference. */ - dma_fence_put(&f->base); + if (f->timeout) + dma_fence_put(&f->base); } -struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold) +struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold, + bool timeout) { struct host1x_syncpt_fence *fence; @@ -129,6 +133,7 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold) fence->sp = sp; fence->threshold = threshold; + fence->timeout = timeout; dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock, dma_fence_context_alloc(1), 0); @@ -154,3 +159,12 @@ int host1x_fence_extract(struct dma_fence *fence, u32 *id, u32 *threshold) return 0; } EXPORT_SYMBOL(host1x_fence_extract); + +void host1x_fence_cancel(struct dma_fence *f) +{ + struct host1x_syncpt_fence *sf = to_host1x_fence(f); + + schedule_delayed_work(&sf->timeout_work, 0); + flush_delayed_work(&sf->timeout_work); +} +EXPORT_SYMBOL(host1x_fence_cancel); diff --git a/drivers/gpu/host1x/fence.h b/drivers/gpu/host1x/fence.h index 4352d046..f3c644c7 100644 --- a/drivers/gpu/host1x/fence.h +++ b/drivers/gpu/host1x/fence.h @@ -13,6 +13,7 @@ struct host1x_syncpt_fence { struct host1x_syncpt *sp; u32 threshold; + bool timeout; struct delayed_work timeout_work; diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 00afe8fb..d8dff41e 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -328,7 +328,7 @@ static int channel_submit(struct host1x_job *job) * Create fence before submitting job to HW to avoid job completing * before the fence is set up. */ - job->fence = host1x_fence_create(sp, syncval); + job->fence = host1x_fence_create(sp, syncval, true); if (WARN(IS_ERR(job->fence), "Failed to create submit complete fence")) { job->fence = NULL; } else { diff --git a/drivers/gpu/host1x/include/linux/host1x-next.h b/drivers/gpu/host1x/include/linux/host1x-next.h index f4b986cb..6fc0bb28 100644 --- a/drivers/gpu/host1x/include/linux/host1x-next.h +++ b/drivers/gpu/host1x/include/linux/host1x-next.h @@ -226,8 +226,10 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, u32 syncpt_id); -struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold); +struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold, + bool timeout); int host1x_fence_extract(struct dma_fence *fence, u32 *id, u32 *threshold); +void host1x_fence_cancel(struct dma_fence *fence); /* * host1x channel diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index 75f58ec2..2d200776 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -236,11 +236,13 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, else if (timeout == 0) return -EAGAIN; - fence = host1x_fence_create(sp, thresh); + fence = host1x_fence_create(sp, thresh, false); if (IS_ERR(fence)) return PTR_ERR(fence); wait_err = dma_fence_wait_timeout(fence, true, timeout); + if (wait_err == 0) + host1x_fence_cancel(fence); dma_fence_put(fence); if (value)