summaryrefslogtreecommitdiff
path: root/drivers/gpu/host1x/fence.c
diff options
context:
space:
mode:
authorMikko Perttunen <mperttunen@nvidia.com>2023-01-19 16:09:21 +0300
committerThierry Reding <treding@nvidia.com>2023-01-26 17:55:38 +0300
commitd5179020f5ce44fd449790a9c12ef6c1a90a2ca7 (patch)
treeb23507f69bf4ea56a40f7261a67ad8a8a04c5f71 /drivers/gpu/host1x/fence.c
parent625d4ffb438cacc9b1ebaa48748cdc7171587cdc (diff)
downloadlinux-d5179020f5ce44fd449790a9c12ef6c1a90a2ca7.tar.xz
gpu: host1x: External timeout/cancellation for fences
Currently all fences have a 30 second timeout to ensure they are cleaned up if the fence never completes otherwise. However, this one size fits all solution doesn't actually fit in every case, such as syncpoint waiting where we want to be able to have timeouts longer than 30 seconds. As such, we want to be able to give control over fence cancellation to the caller (and maybe eventually get rid of the internal timeout altogether). Here we add this cancellation mechanism by essentially adding a function for entering the timeout path by function call, and changing the syncpoint wait function to use it. Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com> Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/gpu/host1x/fence.c')
-rw-r--r--drivers/gpu/host1x/fence.c40
1 files changed, 27 insertions, 13 deletions
diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c
index df5b56692d2c..139ad1afd935 100644
--- a/drivers/gpu/host1x/fence.c
+++ b/drivers/gpu/host1x/fence.c
@@ -37,8 +37,7 @@ static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
return false;
- /* One reference for interrupt path, one for timeout path. */
- dma_fence_get(f);
+ /* Reference for interrupt path. */
dma_fence_get(f);
/*
@@ -46,11 +45,15 @@ static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
* reference to any fences for which 'enable_signaling' has been
* called (and that have not been signalled).
*
- * We cannot (for now) normally guarantee that all fences get signalled.
- * As such, setup a timeout, so that long-lasting fences will get
- * reaped eventually.
+ * We cannot currently always guarantee that all fences get signalled
+ * or cancelled. As such, for such situations, set up a timeout, so
+ * that long-lasting fences will get reaped eventually.
*/
- schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
+ if (sf->timeout) {
+ /* Reference for timeout path. */
+ dma_fence_get(f);
+ schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
+ }
host1x_intr_add_fence_locked(sf->sp->host, sf);
@@ -80,7 +83,7 @@ void host1x_fence_signal(struct host1x_syncpt_fence *f)
return;
}
- if (cancel_delayed_work(&f->timeout_work)) {
+ if (f->timeout && cancel_delayed_work(&f->timeout_work)) {
/*
* We know that the timeout path will not be entered.
* Safe to drop the timeout path's reference now.
@@ -99,8 +102,9 @@ static void do_fence_timeout(struct work_struct *work)
container_of(dwork, struct host1x_syncpt_fence, timeout_work);
if (atomic_xchg(&f->signaling, 1)) {
- /* Already on interrupt path, drop timeout path reference. */
- dma_fence_put(&f->base);
+ /* Already on interrupt path, drop timeout path reference if any. */
+ if (f->timeout)
+ dma_fence_put(&f->base);
return;
}
@@ -114,12 +118,12 @@ static void do_fence_timeout(struct work_struct *work)
dma_fence_set_error(&f->base, -ETIMEDOUT);
dma_fence_signal(&f->base);
-
- /* Drop timeout path reference. */
- dma_fence_put(&f->base);
+ if (f->timeout)
+ dma_fence_put(&f->base);
}
-struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
+ bool timeout)
{
struct host1x_syncpt_fence *fence;
@@ -129,6 +133,7 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
fence->sp = sp;
fence->threshold = threshold;
+ fence->timeout = timeout;
dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock,
dma_fence_context_alloc(1), 0);
@@ -138,3 +143,12 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
return &fence->base;
}
EXPORT_SYMBOL(host1x_fence_create);
+
+void host1x_fence_cancel(struct dma_fence *f)
+{
+ struct host1x_syncpt_fence *sf = to_host1x_fence(f);
+
+ schedule_delayed_work(&sf->timeout_work, 0);
+ flush_delayed_work(&sf->timeout_work);
+}
+EXPORT_SYMBOL(host1x_fence_cancel);