diff options
author | Dave Airlie <airlied@redhat.com> | 2012-12-30 07:54:12 +0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-12-30 07:54:12 +0400 |
commit | 8be0e5c427c18a59ce261c496ae2193cbcbafffd (patch) | |
tree | 26dac80685ba46246642b901460a5042ac4e187c /drivers/gpu/drm/i915/i915_gem.c | |
parent | b1d778b970ce52e02ca6a7f34ba167fe95bc1cc4 (diff) | |
parent | da494d7ca5e0a1afca3480826b5060e15c951e80 (diff) | |
download | linux-8be0e5c427c18a59ce261c496ae2193cbcbafffd.tar.xz |
Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Some fixes for 3.8:
- Watermark fixups from Chris Wilson (4 pieces).
- 2 snb workarounds, seem to be recently added to our internal DB.
- workaround for the infamous i830/i845 hang, seems now finally solid!
Based on Chris' fix for SNA, now also for UXA/mesa&old SNA.
- Some more fixlets for shrinker-pulls-the-rug issues (Chris&me).
- Fix dma-buf flags when exporting (you).
- Disable the VGA plane if it's enabled on lid open - similar fix in
spirit to the one I've sent you last weeek, BIOS' really like to mess
with the display when closing the lid (awesome debug work from Krzysztof
Mazur).
* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel:
drm/i915: disable shrinker lock stealing for create_mmap_offset
drm/i915: optionally disable shrinker lock stealing
drm/i915: fix flags in dma buf exporting
i915: ensure that VGA plane is disabled
drm/i915: Preallocate the drm_mm_node prior to manipulating the GTT drm_mm manager
drm: Export routines for inserting preallocated nodes into the mm manager
drm/i915: don't disable disconnected outputs
drm/i915: Implement workaround for broken CS tlb on i830/845
drm/i915: Implement WaSetupGtModeTdRowDispatch
drm/i915: Implement WaDisableHiZPlanesWhenMSAAEnabled
drm/i915: Prefer CRTC 'active' rather than 'enabled' during WM computations
drm/i915: Clear self-refresh watermarks when disabled
drm/i915: Double the cursor self-refresh latency on Valleyview
drm/i915: Fixup cursor latency used for IVB lp3 watermarks
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 77 |
1 files changed, 37 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 742206e45103..da3c82e301b1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) if (obj->base.map_list.map) return 0; + dev_priv->mm.shrinker_no_lock_stealing = true; + ret = drm_gem_create_mmap_offset(&obj->base); if (ret != -ENOSPC) - return ret; + goto out; /* Badly fragmented mmap space? The only way we can recover * space is by destroying unwanted objects. We can't randomly release @@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); ret = drm_gem_create_mmap_offset(&obj->base); if (ret != -ENOSPC) - return ret; + goto out; i915_gem_shrink_all(dev_priv); - return drm_gem_create_mmap_offset(&obj->base); + ret = drm_gem_create_mmap_offset(&obj->base); +out: + dev_priv->mm.shrinker_no_lock_stealing = false; + + return ret; } static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) @@ -2890,7 +2896,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_mm_node *free_space; + struct drm_mm_node *node; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; @@ -2936,66 +2942,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (node == NULL) { + i915_gem_object_unpin_pages(obj); + return -ENOMEM; + } + search_free: if (map_and_fenceable) - free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, - size, alignment, obj->cache_level, - 0, dev_priv->mm.gtt_mappable_end, - false); + ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, + size, alignment, obj->cache_level, + 0, dev_priv->mm.gtt_mappable_end); else - free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, - size, alignment, obj->cache_level, - false); - - if (free_space != NULL) { - if (map_and_fenceable) - free_space = - drm_mm_get_block_range_generic(free_space, - size, alignment, obj->cache_level, - 0, dev_priv->mm.gtt_mappable_end, - false); - else - free_space = - drm_mm_get_block_generic(free_space, - size, alignment, obj->cache_level, - false); - } - if (free_space == NULL) { + ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, + size, alignment, obj->cache_level); + if (ret) { ret = i915_gem_evict_something(dev, size, alignment, obj->cache_level, map_and_fenceable, nonblocking); - if (ret) { - i915_gem_object_unpin_pages(obj); - return ret; - } + if (ret == 0) + goto search_free; - goto search_free; + i915_gem_object_unpin_pages(obj); + kfree(node); + return ret; } - if (WARN_ON(!i915_gem_valid_gtt_space(dev, - free_space, - obj->cache_level))) { + if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { i915_gem_object_unpin_pages(obj); - drm_mm_put_block(free_space); + drm_mm_put_block(node); return -EINVAL; } ret = i915_gem_gtt_prepare_object(obj); if (ret) { i915_gem_object_unpin_pages(obj); - drm_mm_put_block(free_space); + drm_mm_put_block(node); return ret; } list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - obj->gtt_space = free_space; - obj->gtt_offset = free_space->start; + obj->gtt_space = node; + obj->gtt_offset = node->start; fenceable = - free_space->size == fence_size && - (free_space->start & (fence_alignment - 1)) == 0; + node->size == fence_size && + (node->start & (fence_alignment - 1)) == 0; mappable = obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; @@ -4392,6 +4386,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) if (!mutex_is_locked_by(&dev->struct_mutex, current)) return 0; + if (dev_priv->mm.shrinker_no_lock_stealing) + return 0; + unlock = false; } |