summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-06-09 14:03:49 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2017-06-14 12:55:11 +0300
commit290271de34f6c22ec2337e3293224575459747d6 (patch)
treef96d17399bd8f949a07034ac82ada002f995624a
parent0f6ab55d7ab6847e9b799022ed3b5511c756e512 (diff)
downloadlinux-290271de34f6c22ec2337e3293224575459747d6.tar.xz
drm/i915: Spin for struct_mutex inside shrinker
Having resolved whether or not we would deadlock upon a call to mutex_lock(&dev->struct_mutex), we can then spin for the contended struct_mutex if we are not the owner. We cannot afford to simply block and wait for the mutex, as the owner may itself be waiting for the allocator -- i.e. a cyclic deadlock. This should significantly improve the chance of running the shrinker for other processes whilst the GPU is busy. A more balanced approach would be to optimistically spin whilst the mutex owner was on the cpu and there was an opportunity to acquire the mutex for ourselves quickly. However, that requires support from kernel/locking/ and a new mutex_spin_trylock() primitive. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-4-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 58f27369183c..1032f98add11 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -38,16 +38,21 @@
static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
{
switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
- case MUTEX_TRYLOCK_FAILED:
- return false;
-
- case MUTEX_TRYLOCK_SUCCESS:
- *unlock = true;
- return true;
-
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
+
+ case MUTEX_TRYLOCK_FAILED:
+ do {
+ cpu_relax();
+ if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
+ }
+ } while (!need_resched());
+
+ return false;
}
BUG();