summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_active.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-07-31 11:50:13 +0300
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2020-09-07 13:18:55 +0300
commite28860ae21da258a1f983b52941a3f581281d114 (patch)
treef13793280f4263a919e9683d90b478ee2687985e /drivers/gpu/drm/i915/i915_active.c
parent99a7f4dae7ee6081df04741e0c4ea5e49b139540 (diff)
downloadlinux-e28860ae21da258a1f983b52941a3f581281d114.tar.xz
drm/i915: Make the stale cached active node available for any timeline
Rather than require the next timeline after idling to match the MRU before idling, reset the index on the node and allow it to match the first request. However, this requires cmpxchg(u64) and so is not trivial on 32b, so for compatibility we just fallback to keeping the cached node pointing to the MRU timeline. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200731085015.32368-5-chris@chris-wilson.co.uk Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_active.c')
-rw-r--r--drivers/gpu/drm/i915/i915_active.c30
1 files changed, 28 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index e63578c80f53..89c34a69a2ea 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -157,6 +157,10 @@ __active_retire(struct i915_active *ref)
rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
rb_insert_color(&ref->cache->node, &ref->tree);
GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
+
+ /* Make the cached node available for reuse with any timeline */
+ if (IS_ENABLED(CONFIG_64BIT))
+ ref->cache->timeline = 0; /* needs cmpxchg(u64) */
}
spin_unlock_irqrestore(&ref->tree_lock, flags);
@@ -235,6 +239,8 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
{
struct active_node *it;
+ GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
+
/*
* We track the most recently used timeline to skip a rbtree search
* for the common case, under typical loads we never need the rbtree
@@ -243,8 +249,28 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
* current timeline.
*/
it = READ_ONCE(ref->cache);
- if (it && it->timeline == idx)
- return it;
+ if (it) {
+ u64 cached = READ_ONCE(it->timeline);
+
+ /* Once claimed, this slot will only belong to this idx */
+ if (cached == idx)
+ return it;
+
+#ifdef CONFIG_64BIT /* for cmpxchg(u64) */
+ /*
+ * An unclaimed cache [.timeline=0] can only be claimed once.
+ *
+ * If the value is already non-zero, some other thread has
+ * claimed the cache and we know that is does not match our
+ * idx. If, and only if, the timeline is currently zero is it
+ * worth competing to claim it atomically for ourselves (for
+ * only the winner of that race will cmpxchg return the old
+ * value of 0).
+ */
+ if (!cached && !cmpxchg(&it->timeline, 0, idx))
+ return it;
+#endif
+ }
BUILD_BUG_ON(offsetof(typeof(*it), node));