diff options
author | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2021-01-28 19:25:39 +0300 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2021-03-24 19:27:20 +0300 |
commit | f1ac8a0292605e094533c02106d1d2f6a97028a3 (patch) | |
tree | 3cac20411aecf922af3a0dd68c6fb08f87421c03 /drivers/gpu/drm/i915/i915_gem.c | |
parent | c93987753ae8057e61a3902e71362613f7256600 (diff) | |
download | linux-f1ac8a0292605e094533c02106d1d2f6a97028a3.tar.xz |
drm/i915: Fix pread/pwrite to work with new locking rules.
We are removing obj->mm.lock, and need to take the reservation lock
before we can pin pages. Move the pinning pages into the helper, and
merge gtt pwrite/pread preparation and cleanup paths.
The fence lock is also removed; it will conflict with fence annotations,
because of memory allocations done when pagefaulting inside copy_*_user.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
[danvet: Pick the older version to avoid the conflicts]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210128162612.927917-31-maarten.lankhorst@linux.intel.com
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-31-maarten.lankhorst@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 224 |
1 files changed, 114 insertions, 110 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index caa7bedb564f..8027cb316d4b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -204,7 +204,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, { unsigned int needs_clflush; unsigned int idx, offset; - struct dma_fence *fence; char __user *user_data; u64 remain; int ret; @@ -213,19 +212,17 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, if (ret) return ret; + ret = i915_gem_object_pin_pages(obj); + if (ret) + goto err_unlock; + ret = i915_gem_object_prepare_read(obj, &needs_clflush); - if (ret) { - i915_gem_object_unlock(obj); - return ret; - } + if (ret) + goto err_unpin; - fence = i915_gem_object_lock_fence(obj); i915_gem_object_finish_access(obj); i915_gem_object_unlock(obj); - if (!fence) - return -ENOMEM; - remain = args->size; user_data = u64_to_user_ptr(args->data_ptr); offset = offset_in_page(args->offset); @@ -243,7 +240,13 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, offset = 0; } - i915_gem_object_unlock_fence(obj, fence); + i915_gem_object_unpin_pages(obj); + return ret; + +err_unpin: + i915_gem_object_unpin_pages(obj); +err_unlock: + i915_gem_object_unlock(obj); return ret; } @@ -271,52 +274,102 @@ gtt_user_read(struct io_mapping *mapping, return unwritten; } -static int -i915_gem_gtt_pread(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_pread *args) +static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj, + struct drm_mm_node *node, + bool write) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; - intel_wakeref_t wakeref; - struct drm_mm_node node; - struct dma_fence *fence; - void __user *user_data; struct i915_vma *vma; - u64 remain, offset; + struct i915_gem_ww_ctx ww; int ret; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + i915_gem_ww_ctx_init(&ww, true); +retry: vma = ERR_PTR(-ENODEV); + ret = i915_gem_object_lock(obj, &ww); + if (ret) + goto err_ww; + + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto err_ww; + if (!i915_gem_object_is_tiled(obj)) - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, - PIN_MAPPABLE | - PIN_NONBLOCK /* NOWARN */ | - PIN_NOEVICT); - if (!IS_ERR(vma)) { - node.start = i915_ggtt_offset(vma); - node.flags = 0; + vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK /* NOWARN */ | + PIN_NOEVICT); + if (vma == ERR_PTR(-EDEADLK)) { + ret = -EDEADLK; + goto err_ww; + } else if (!IS_ERR(vma)) { + node->start = i915_ggtt_offset(vma); + node->flags = 0; } else { - ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); + ret = insert_mappable_node(ggtt, node, PAGE_SIZE); if (ret) - goto out_rpm; - GEM_BUG_ON(!drm_mm_node_allocated(&node)); + goto err_ww; + GEM_BUG_ON(!drm_mm_node_allocated(node)); + vma = NULL; } - ret = i915_gem_object_lock_interruptible(obj, NULL); - if (ret) - goto out_unpin; - - ret = i915_gem_object_set_to_gtt_domain(obj, false); + ret = i915_gem_object_pin_pages(obj); if (ret) { - i915_gem_object_unlock(obj); - goto out_unpin; + if (drm_mm_node_allocated(node)) { + ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); + remove_mappable_node(ggtt, node); + } else { + i915_vma_unpin(vma); + } } - fence = i915_gem_object_lock_fence(obj); - i915_gem_object_unlock(obj); - if (!fence) { - ret = -ENOMEM; - goto out_unpin; +err_ww: + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + + return ret ? ERR_PTR(ret) : vma; +} + +static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj, + struct drm_mm_node *node, + struct i915_vma *vma) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; + + i915_gem_object_unpin_pages(obj); + if (drm_mm_node_allocated(node)) { + ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); + remove_mappable_node(ggtt, node); + } else { + i915_vma_unpin(vma); + } +} + +static int +i915_gem_gtt_pread(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pread *args) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; + intel_wakeref_t wakeref; + struct drm_mm_node node; + void __user *user_data; + struct i915_vma *vma; + u64 remain, offset; + int ret = 0; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + vma = i915_gem_gtt_prepare(obj, &node, false); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out_rpm; } user_data = u64_to_user_ptr(args->data_ptr); @@ -353,14 +406,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, offset += page_length; } - i915_gem_object_unlock_fence(obj, fence); -out_unpin: - if (drm_mm_node_allocated(&node)) { - ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); - remove_mappable_node(ggtt, &node); - } else { - i915_vma_unpin(vma); - } + i915_gem_gtt_cleanup(obj, &node, vma); out_rpm: intel_runtime_pm_put(&i915->runtime_pm, wakeref); return ret; @@ -425,15 +471,10 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (ret) goto out; - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto out; - ret = i915_gem_shmem_pread(obj, args); if (ret == -EFAULT || ret == -ENODEV) ret = i915_gem_gtt_pread(obj, args); - i915_gem_object_unpin_pages(obj); out: i915_gem_object_put(obj); return ret; @@ -481,11 +522,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t wakeref; struct drm_mm_node node; - struct dma_fence *fence; struct i915_vma *vma; u64 remain, offset; void __user *user_data; - int ret; + int ret = 0; if (i915_gem_object_has_struct_page(obj)) { /* @@ -503,37 +543,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, wakeref = intel_runtime_pm_get(rpm); } - vma = ERR_PTR(-ENODEV); - if (!i915_gem_object_is_tiled(obj)) - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, - PIN_MAPPABLE | - PIN_NONBLOCK /* NOWARN */ | - PIN_NOEVICT); - if (!IS_ERR(vma)) { - node.start = i915_ggtt_offset(vma); - node.flags = 0; - } else { - ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); - if (ret) - goto out_rpm; - GEM_BUG_ON(!drm_mm_node_allocated(&node)); - } - - ret = i915_gem_object_lock_interruptible(obj, NULL); - if (ret) - goto out_unpin; - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) { - i915_gem_object_unlock(obj); - goto out_unpin; - } - - fence = i915_gem_object_lock_fence(obj); - i915_gem_object_unlock(obj); - if (!fence) { - ret = -ENOMEM; - goto out_unpin; + vma = i915_gem_gtt_prepare(obj, &node, true); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out_rpm; } i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); @@ -582,14 +595,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, intel_gt_flush_ggtt_writes(ggtt->vm.gt); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); - i915_gem_object_unlock_fence(obj, fence); -out_unpin: - if (drm_mm_node_allocated(&node)) { - ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); - remove_mappable_node(ggtt, &node); - } else { - i915_vma_unpin(vma); - } + i915_gem_gtt_cleanup(obj, &node, vma); out_rpm: intel_runtime_pm_put(rpm, wakeref); return ret; @@ -629,7 +635,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, unsigned int partial_cacheline_write; unsigned int needs_clflush; unsigned int offset, idx; - struct dma_fence *fence; void __user *user_data; u64 remain; int ret; @@ -638,19 +643,17 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; + ret = i915_gem_object_pin_pages(obj); + if (ret) + goto err_unlock; + ret = i915_gem_object_prepare_write(obj, &needs_clflush); - if (ret) { - i915_gem_object_unlock(obj); - return ret; - } + if (ret) + goto err_unpin; - fence = i915_gem_object_lock_fence(obj); i915_gem_object_finish_access(obj); i915_gem_object_unlock(obj); - if (!fence) - return -ENOMEM; - /* If we don't overwrite a cacheline completely we need to be * careful to have up-to-date data by first clflushing. Don't * overcomplicate things and flush the entire patch. @@ -678,8 +681,14 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, } i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); - i915_gem_object_unlock_fence(obj, fence); + i915_gem_object_unpin_pages(obj); + return ret; + +err_unpin: + i915_gem_object_unpin_pages(obj); +err_unlock: + i915_gem_object_unlock(obj); return ret; } @@ -743,10 +752,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret) goto err; - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto err; - ret = -EFAULT; /* We can only do the GTT pwrite on untiled buffers, as otherwise * it would end up going through the fenced access, and we'll get @@ -767,7 +772,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_shmem_pwrite(obj, args); } - i915_gem_object_unpin_pages(obj); err: i915_gem_object_put(obj); return ret; |