diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2020-01-10 17:44:18 +0300 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2020-01-10 23:30:40 +0300 |
commit | c0e60347d44d5e06869a253de326373e976be1c7 (patch) | |
tree | 9ec946219384465103acd8cb75f8ecb97649e394 /drivers/gpu/drm/i915 | |
parent | 8cbf89db294166cc13d90a89422605e0c9f8bbc2 (diff) | |
download | linux-c0e60347d44d5e06869a253de326373e976be1c7.tar.xz |
drm/i915/gt: Hold rpm wakeref before taking ggtt->vm.mutex
We need to hold the runtime-pm wakeref to update the global PTEs (as
they exist behind a PCI BAR). However, some systems invoke ACPI during
runtime resume and so require allocations, which is verboten inside the
vm->mutex. Ergo, we must not use intel_runtime_pm_get() inside the
mutex, but lift the call outside.
Closes: https://gitlab.freedesktop.org/drm/intel/issues/958
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200110144418.1415639-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ggtt.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 14 |
2 files changed, 19 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index c8ba38e7cda7..79096722ce16 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -430,9 +430,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - struct drm_i915_private *i915 = vma->vm->i915; struct drm_i915_gem_object *obj = vma->obj; - intel_wakeref_t wakeref; u32 pte_flags; /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ @@ -440,8 +438,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -457,11 +454,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, static void ggtt_unbind_vma(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); } static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) @@ -571,7 +564,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - struct drm_i915_private *i915 = vma->vm->i915; u32 pte_flags; int ret; @@ -599,28 +591,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, cache_level, pte_flags); } - if (flags & I915_VMA_GLOBAL_BIND) { - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - vma->vm->insert_entries(vma->vm, vma, - cache_level, pte_flags); - } - } + if (flags & I915_VMA_GLOBAL_BIND) + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); return 0; } static void aliasing_gtt_unbind_vma(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; - if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { struct i915_address_space *vm = vma->vm; - intel_wakeref_t wakeref; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vm->clear_range(vm, vma->node.start, vma->size); + vm->clear_range(vm, vma->node.start, vma->size); } if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 43d5c270bdb0..17d7c525ea5c 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -858,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma) int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { struct i915_vma_work *work = NULL; + intel_wakeref_t wakeref = 0; unsigned int bound; int err; @@ -883,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } } + if (flags & PIN_GLOBAL) + wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); + /* No more allocations allowed once we hold vm->mutex */ err = mutex_lock_interruptible(&vma->vm->mutex); if (err) @@ -946,6 +950,8 @@ err_unlock: err_fence: if (work) dma_fence_work_commit(&work->base); + if (wakeref) + intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); err_pages: vma_put_pages(vma); return err; @@ -1246,11 +1252,16 @@ int __i915_vma_unbind(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma) { struct i915_address_space *vm = vma->vm; + intel_wakeref_t wakeref = 0; int err; if (!drm_mm_node_allocated(&vma->node)) return 0; + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + /* XXX not always required: nop_clear_range */ + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + err = mutex_lock_interruptible(&vm->mutex); if (err) return err; @@ -1258,6 +1269,9 @@ int i915_vma_unbind(struct i915_vma *vma) err = __i915_vma_unbind(vma); mutex_unlock(&vm->mutex); + if (wakeref) + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); + return err; } |