diff options
author | Dave Airlie <airlied@redhat.com> | 2018-06-28 06:10:37 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-06-28 06:10:44 +0300 |
commit | b4d4b0b7defbc226cc2237e08ced62c1c806e301 (patch) | |
tree | a47b03c37a7ee2ef5fc78f7b12f6a61d2bef508d /drivers/gpu/drm/i915/i915_vma.c | |
parent | 3c8daa7db46dd50c95778b4fbca5c7da6edaac9b (diff) | |
parent | e1cacec9d50d7299893eeab2d895189f3db625da (diff) | |
download | linux-b4d4b0b7defbc226cc2237e08ced62c1c806e301.tar.xz |
Merge tag 'drm-intel-next-2018-06-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Chris is doing many reworks that allow us to get full-ppgtt supported
on all platforms back to HSW. As well many other fix and improvements,
Including:
- Use GEM suspend when aborting initialization (Chris)
- Change i915_gem_fault to return vm_fault_t (Chris)
- Expand VMA to Non gem object entities (Chris)
- Improve logs for load failure, but quite logging on fault injection to avoid noise on CI (Chris)
- Other page directory handling fixes and improvements for gen6 (Chris)
- Other gtt clean-up removing redundancies and unused checks (Chris)
- Reorder aliasing ppgtt fini (Chris)
- Refactor of unsetting obg->mm.pages (Chris)
- Apply batch location restrictions before pinning (Chris)
- Ringbuffer fixes for context restore (Chris)
- Execlist fixes on freeing error pointer on allocation error (Chris)
- Make closing request flush mandatory (Chris)
- Move GEM sanitize from resume_early to resume (Chris)
- Improve debug dumps (Chris)
- Silent compiler for selftest (Chris)
- Other execlists changes to improve hangcheck and reset.
- Many gtt page directory fixes and improvements (Chris)
- Reorg context workarounds (Chris)
- Avoid ERR_PTR dereference on selftest (Chris)
Other GEM related work:
- Stop trying to reset GPU if reset failed (Mika)
- Add HW workaround for KBL to fix GPU reset (Mika)
- Fix context ban and hang accounting for client (Mika)
- Fixes on OA perf (Michel, Jani)
- Refactor on GuC log mechanisms (Piotr)
- Enable provoking vertex fix on Gen9 system (Kenneth)
More ICL patches for Display enabling:
- ICL - 10-bit support for HDMI (RK)
- ICL - Start adding TBT PLL (Paulo)
- ICL - DDI HDMK level selection (Manasi)
- ICL - GMBUS GPIO pin mapping fix (Mahesh)
- ICL - Adding DP_AUX_E support (James)
- ICL - Display interrupts handling (DK)
Other display fixes and improvements:
- Fix sprite destination color keying on SKL+ (Ville)
- Fixes and improvements on PCH detection, specially for non PCH systems (Jani)
- Document PCH_NOP (Lucas)
- Allow DBLSCAN user modes with eDP/LVDS/DSI (Ville)
- Opregion and ACPI cleanup and organization (Jani)
- Kill delays when activation psr (Rodrigo)
- ...and a consequent fix of the psr activation flow (DK)
- Fix HDMI infoframe setting (Imre)
- Fix Display interrupts and modes on old gens (Ville)
- Start switching to kernel unsigned int types (Jani)
- Introduction to Amber Lake and Whiskey Lake platforms (Jose)
- Audio clock fixes for HBR3 (RK)
- Standardize i915_reg.h definitions according to our doc and checkpatch (Paulo)
- Remove unused timespec_to_jiffies_timeout function (Arnd)
- Increase the scope of PSR wake fix for other VBTs out there (Vathsala)
- Improve debug msgs with prop name/id (Ville)
- Other clean up on unecessary cursor size defines (Ville)
- Enforce max hdisplay/hblank_start limits on HSW/BDW (Ville)
- Make ELD pointers constant (Jani)
- Fix for PSR VBT parse (Colin)
- Add warn about unsupported CDCLK rates (Imre)
Signed-off-by: Dave Airlie <airlied@redhat.com>
# gpg: Signature made Thu 21 Jun 2018 07:12:10 AM AEST
# gpg: using RSA key FA625F640EEB13CA
# gpg: Good signature from "Rodrigo Vivi <rodrigo.vivi@intel.com>"
# gpg: aka "Rodrigo Vivi <rodrigo.vivi@gmail.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 6D20 7068 EEDD 6509 1C2C E2A3 FA62 5F64 0EEB 13CA
Link: https://patchwork.freedesktop.org/patch/msgid/20180625165622.GA21761@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 107 |
1 files changed, 62 insertions, 45 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 912f16ffe7ee..e82aa804cdba 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -95,6 +95,7 @@ vma_create(struct drm_i915_gem_object *obj, init_request_active(&vma->last_read[i], i915_vma_retire); init_request_active(&vma->last_fence, NULL); vma->vm = vm; + vma->ops = &vm->vma_ops; vma->obj = obj; vma->resv = obj->resv; vma->size = obj->base.size; @@ -280,7 +281,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, GEM_BUG_ON(!vma->pages); trace_i915_vma_bind(vma, bind_flags); - ret = vma->vm->bind_vma(vma, cache_level, bind_flags); + ret = vma->ops->bind_vma(vma, cache_level, bind_flags); if (ret) return ret; @@ -345,7 +346,7 @@ void i915_vma_flush_writes(struct i915_vma *vma) void i915_vma_unpin_iomap(struct i915_vma *vma) { - lockdep_assert_held(&vma->obj->base.dev->struct_mutex); + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); GEM_BUG_ON(vma->iomap == NULL); @@ -365,6 +366,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma) return; obj = vma->obj; + GEM_BUG_ON(!obj); i915_vma_unpin(vma); i915_vma_close(vma); @@ -489,7 +491,7 @@ static int i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { struct drm_i915_private *dev_priv = vma->vm->i915; - struct drm_i915_gem_object *obj = vma->obj; + unsigned int cache_level; u64 start, end; int ret; @@ -524,20 +526,25 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) * attempt to find space. */ if (size > end) { - DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", - size, obj->base.size, - flags & PIN_MAPPABLE ? "mappable" : "total", + DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", + size, flags & PIN_MAPPABLE ? "mappable" : "total", end); return -ENOSPC; } - ret = i915_gem_object_pin_pages(obj); - if (ret) - return ret; + if (vma->obj) { + ret = i915_gem_object_pin_pages(vma->obj); + if (ret) + return ret; + + cache_level = vma->obj->cache_level; + } else { + cache_level = 0; + } GEM_BUG_ON(vma->pages); - ret = vma->vm->set_pages(vma); + ret = vma->ops->set_pages(vma); if (ret) goto err_unpin; @@ -550,7 +557,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } ret = i915_gem_gtt_reserve(vma->vm, &vma->node, - size, offset, obj->cache_level, + size, offset, cache_level, flags); if (ret) goto err_clear; @@ -589,7 +596,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } ret = i915_gem_gtt_insert(vma->vm, &vma->node, - size, alignment, obj->cache_level, + size, alignment, cache_level, start, end, flags); if (ret) goto err_clear; @@ -598,23 +605,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(vma->node.start + vma->node.size > end); } GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); + GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); list_move_tail(&vma->vm_link, &vma->vm->inactive_list); - spin_lock(&dev_priv->mm.obj_lock); - list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); - obj->bind_count++; - spin_unlock(&dev_priv->mm.obj_lock); + if (vma->obj) { + struct drm_i915_gem_object *obj = vma->obj; + + spin_lock(&dev_priv->mm.obj_lock); + list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); + obj->bind_count++; + spin_unlock(&dev_priv->mm.obj_lock); - assert_bind_count(obj); + assert_bind_count(obj); + } return 0; err_clear: - vma->vm->clear_pages(vma); + vma->ops->clear_pages(vma); err_unpin: - i915_gem_object_unpin_pages(obj); + if (vma->obj) + i915_gem_object_unpin_pages(vma->obj); return ret; } @@ -622,30 +634,35 @@ static void i915_vma_remove(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; - struct drm_i915_gem_object *obj = vma->obj; GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); - vma->vm->clear_pages(vma); + vma->ops->clear_pages(vma); drm_mm_remove_node(&vma->node); list_move_tail(&vma->vm_link, &vma->vm->unbound_list); - /* Since the unbound list is global, only move to that list if + /* + * Since the unbound list is global, only move to that list if * no more VMAs exist. */ - spin_lock(&i915->mm.obj_lock); - if (--obj->bind_count == 0) - list_move_tail(&obj->mm.link, &i915->mm.unbound_list); - spin_unlock(&i915->mm.obj_lock); - - /* And finally now the object is completely decoupled from this vma, - * we can drop its hold on the backing storage and allow it to be - * reaped by the shrinker. - */ - i915_gem_object_unpin_pages(obj); - assert_bind_count(obj); + if (vma->obj) { + struct drm_i915_gem_object *obj = vma->obj; + + spin_lock(&i915->mm.obj_lock); + if (--obj->bind_count == 0) + list_move_tail(&obj->mm.link, &i915->mm.unbound_list); + spin_unlock(&i915->mm.obj_lock); + + /* + * And finally now the object is completely decoupled from this + * vma, we can drop its hold on the backing storage and allow + * it to be reaped by the shrinker. + */ + i915_gem_object_unpin_pages(obj); + assert_bind_count(obj); + } } int __i915_vma_do_pin(struct i915_vma *vma, @@ -670,7 +687,7 @@ int __i915_vma_do_pin(struct i915_vma *vma, } GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - ret = i915_vma_bind(vma, vma->obj->cache_level, flags); + ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); if (ret) goto err_remove; @@ -727,6 +744,7 @@ void i915_vma_reopen(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma) { + struct drm_i915_private *i915 = vma->vm->i915; int i; GEM_BUG_ON(vma->node.allocated); @@ -738,12 +756,13 @@ static void __i915_vma_destroy(struct i915_vma *vma) list_del(&vma->obj_link); list_del(&vma->vm_link); - rb_erase(&vma->obj_node, &vma->obj->vma_tree); + if (vma->obj) + rb_erase(&vma->obj_node, &vma->obj->vma_tree); if (!i915_vma_is_ggtt(vma)) i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); + kmem_cache_free(i915->vmas, vma); } void i915_vma_destroy(struct i915_vma *vma) @@ -809,13 +828,13 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma) { - struct drm_i915_gem_object *obj = vma->obj; unsigned long active; int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - /* First wait upon any activity as retiring the request may + /* + * First wait upon any activity as retiring the request may * have side-effects such as unpinning or even unbinding this vma. */ might_sleep(); @@ -823,7 +842,8 @@ int i915_vma_unbind(struct i915_vma *vma) if (active) { int idx; - /* When a closed VMA is retired, it is unbound - eek. + /* + * When a closed VMA is retired, it is unbound - eek. * In order to prevent it from being recursively closed, * take a pin on the vma so that the second unbind is * aborted. @@ -861,9 +881,6 @@ int i915_vma_unbind(struct i915_vma *vma) if (!drm_mm_node_allocated(&vma->node)) return 0; - GEM_BUG_ON(obj->bind_count == 0); - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - if (i915_vma_is_map_and_fenceable(vma)) { /* * Check that we have flushed all writes through the GGTT @@ -890,7 +907,7 @@ int i915_vma_unbind(struct i915_vma *vma) if (likely(!vma->vm->closed)) { trace_i915_vma_unbind(vma); - vma->vm->unbind_vma(vma); + vma->ops->unbind_vma(vma); } vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); |