diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 569 |
1 files changed, 414 insertions, 155 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 68cf1d392250..845cd88f8313 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -38,6 +38,18 @@ #include "i915_sw_fence_work.h" #include "i915_trace.h" #include "i915_vma.h" +#include "i915_vma_resource.h" + +static inline void assert_vma_held_evict(const struct i915_vma *vma) +{ + /* + * We may be forced to unbind when the vm is dead, to clean it up. + * This is the only exception to the requirement of the object lock + * being held. + */ + if (atomic_read(&vma->vm->open)) + assert_object_held_shared(vma->obj); +} static struct kmem_cache *slab_vmas; @@ -285,7 +297,7 @@ struct i915_vma_work { struct dma_fence_work base; struct i915_address_space *vm; struct i915_vm_pt_stash stash; - struct i915_vma *vma; + struct i915_vma_resource *vma_res; struct drm_i915_gem_object *pinned; struct i915_sw_dma_fence_cb cb; enum i915_cache_level cache_level; @@ -295,23 +307,24 @@ struct i915_vma_work { static void __vma_bind(struct dma_fence_work *work) { struct i915_vma_work *vw = container_of(work, typeof(*vw), base); - struct i915_vma *vma = vw->vma; + struct i915_vma_resource *vma_res = vw->vma_res; + + vma_res->ops->bind_vma(vma_res->vm, &vw->stash, + vma_res, vw->cache_level, vw->flags); - vma->ops->bind_vma(vw->vm, &vw->stash, - vma, vw->cache_level, vw->flags); } static void __vma_release(struct dma_fence_work *work) { struct i915_vma_work *vw = container_of(work, typeof(*vw), base); - if (vw->pinned) { - __i915_gem_object_unpin_pages(vw->pinned); + if (vw->pinned) i915_gem_object_put(vw->pinned); - } i915_vm_free_pt_stash(vw->vm, &vw->stash); i915_vm_put(vw->vm); + if (vw->vma_res) + i915_vma_resource_put(vw->vma_res); } static const struct dma_fence_work_ops bind_ops = { @@ -375,12 +388,27 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma) #define i915_vma_verify_bind_complete(_vma) 0 #endif +I915_SELFTEST_EXPORT void +i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res, + struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj = vma->obj; + + i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, + obj->mm.rsgt, i915_gem_object_is_readonly(obj), + i915_gem_object_is_lmem(obj), obj->mm.region, + vma->ops, vma->private, vma->node.start, + vma->node.size, vma->size); +} + /** * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. * @vma: VMA to map * @cache_level: mapping cache level * @flags: flags like global or local mapping * @work: preallocated worker for allocating and binding the PTE + * @vma_res: pointer to a preallocated vma resource. The resource is either + * consumed or freed. * * DMA addresses are taken from the scatter-gather table of this object (or of * this VMA in case of non-default GGTT views) and PTE entries set up. @@ -389,10 +417,12 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma) int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags, - struct i915_vma_work *work) + struct i915_vma_work *work, + struct i915_vma_resource *vma_res) { u32 bind_flags; u32 vma_flags; + int ret; lockdep_assert_held(&vma->vm->mutex); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); @@ -400,11 +430,15 @@ int i915_vma_bind(struct i915_vma *vma, if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, vma->node.size, - vma->vm->total))) + vma->vm->total))) { + i915_vma_resource_free(vma_res); return -ENODEV; + } - if (GEM_DEBUG_WARN_ON(!flags)) + if (GEM_DEBUG_WARN_ON(!flags)) { + i915_vma_resource_free(vma_res); return -EINVAL; + } bind_flags = flags; bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; @@ -413,16 +447,44 @@ int i915_vma_bind(struct i915_vma *vma, vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; bind_flags &= ~vma_flags; - if (bind_flags == 0) + if (bind_flags == 0) { + i915_vma_resource_free(vma_res); return 0; + } GEM_BUG_ON(!atomic_read(&vma->pages_count)); + /* Wait for or await async unbinds touching our range */ + if (work && bind_flags & vma->vm->bind_async_flags) + ret = i915_vma_resource_bind_dep_await(vma->vm, + &work->base.chain, + vma->node.start, + vma->node.size, + true, + GFP_NOWAIT | + __GFP_RETRY_MAYFAIL | + __GFP_NOWARN); + else + ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start, + vma->node.size, true); + if (ret) { + i915_vma_resource_free(vma_res); + return ret; + } + + if (vma->resource || !vma_res) { + /* Rebinding with an additional I915_VMA_*_BIND */ + GEM_WARN_ON(!vma_flags); + i915_vma_resource_free(vma_res); + } else { + i915_vma_resource_init_from_vma(vma_res, vma); + vma->resource = vma_res; + } trace_i915_vma_bind(vma, bind_flags); if (work && bind_flags & vma->vm->bind_async_flags) { struct dma_fence *prev; - work->vma = vma; + work->vma_res = i915_vma_resource_get(vma->resource); work->cache_level = cache_level; work->flags = bind_flags; @@ -445,17 +507,25 @@ int i915_vma_bind(struct i915_vma *vma, work->base.dma.error = 0; /* enable the queue_work() */ - __i915_gem_object_pin_pages(vma->obj); - work->pinned = i915_gem_object_get(vma->obj); + /* + * If we don't have the refcounted pages list, keep a reference + * on the object to avoid waiting for the async bind to + * complete in the object destruction path. + */ + if (!work->vma_res->bi.pages_rsgt) + work->pinned = i915_gem_object_get(vma->obj); } else { if (vma->obj) { - int ret; - ret = i915_gem_object_wait_moving_fence(vma->obj, true); - if (ret) + if (ret) { + i915_vma_resource_free(vma->resource); + vma->resource = NULL; + return ret; + } } - vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); + vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level, + bind_flags); } if (vma->obj) @@ -655,7 +725,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) * 0 on success, negative error code otherwise. */ static int -i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u64 size, u64 alignment, u64 flags) { unsigned long color; u64 start, end; @@ -707,7 +778,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) range_overflows(offset, size, end)) return -EINVAL; - ret = i915_gem_gtt_reserve(vma->vm, &vma->node, + ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node, size, offset, color, flags); if (ret) @@ -746,7 +817,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) size = round_up(size, I915_GTT_PAGE_SIZE_2M); } - ret = i915_gem_gtt_insert(vma->vm, &vma->node, + ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node, size, alignment, color, start, end, flags); if (ret) @@ -780,9 +851,17 @@ i915_vma_detach(struct i915_vma *vma) static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) { unsigned int bound; - bool pinned = true; bound = atomic_read(&vma->flags); + + if (flags & PIN_VALIDATE) { + flags &= I915_VMA_BIND_MASK; + + return (flags & bound) == flags; + } + + /* with the lock mandatory for unbind, we don't race here */ + flags &= I915_VMA_BIND_MASK; do { if (unlikely(flags & ~bound)) return false; @@ -790,34 +869,10 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) return false; - if (!(bound & I915_VMA_PIN_MASK)) - goto unpinned; - GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); return true; - -unpinned: - /* - * If pin_count==0, but we are bound, check under the lock to avoid - * racing with a concurrent i915_vma_unbind(). - */ - mutex_lock(&vma->vm->mutex); - do { - if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { - pinned = false; - break; - } - - if (unlikely(flags & ~bound)) { - pinned = false; - break; - } - } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); - mutex_unlock(&vma->vm->mutex); - - return pinned; } static struct scatterlist * @@ -913,30 +968,39 @@ err_st_alloc: } static struct scatterlist * -remap_pages(struct drm_i915_gem_object *obj, - unsigned int offset, unsigned int alignment_pad, - unsigned int width, unsigned int height, - unsigned int src_stride, unsigned int dst_stride, - struct sg_table *st, struct scatterlist *sg) +add_padding_pages(unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a convenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; + sg = sg_next(sg); + + return sg; +} + +static struct scatterlist * +remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int offset, unsigned int alignment_pad, + unsigned int width, unsigned int height, + unsigned int src_stride, unsigned int dst_stride, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) { unsigned int row; if (!width || !height) return sg; - if (alignment_pad) { - st->nents++; - - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a convenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, alignment_pad * 4096, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = alignment_pad * 4096; - sg = sg_next(sg); - } + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); for (row = 0; row < height; row++) { unsigned int left = width * I915_GTT_PAGE_SIZE; @@ -973,18 +1037,98 @@ remap_pages(struct drm_i915_gem_object *obj, if (!left) continue; + sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); + } + + *gtt_offset += alignment_pad + dst_stride * height; + + return sg; +} + +static struct scatterlist * +remap_contiguous_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, + unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + struct scatterlist *iter; + unsigned int offset; + + iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); + GEM_BUG_ON(!iter); + + do { + unsigned int len; + + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), + count << PAGE_SHIFT); + sg_set_page(sg, NULL, len, 0); + sg_dma_address(sg) = + sg_dma_address(iter) + (offset << PAGE_SHIFT); + sg_dma_len(sg) = len; + st->nents++; + count -= len >> PAGE_SHIFT; + if (count == 0) + return sg; - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a conenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, left, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = left; - sg = sg_next(sg); - } + sg = __sg_next(sg); + iter = __sg_next(iter); + offset = 0; + } while (1); +} + +static struct scatterlist * +remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, unsigned int alignment_pad, + unsigned int size, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + if (!size) + return sg; + + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); + + sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); + sg = sg_next(sg); + + *gtt_offset += alignment_pad + size; + + return sg; +} + +static struct scatterlist * +remap_color_plane_pages(const struct intel_remapped_info *rem_info, + struct drm_i915_gem_object *obj, + int color_plane, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + unsigned int alignment_pad = 0; + + if (rem_info->plane_alignment) + alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; + + if (rem_info->plane[color_plane].linear) + sg = remap_linear_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].size, + st, sg, + gtt_offset); + + else + sg = remap_tiled_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].width, + rem_info->plane[color_plane].height, + rem_info->plane[color_plane].src_stride, + rem_info->plane[color_plane].dst_stride, + st, sg, + gtt_offset); return sg; } @@ -1013,21 +1157,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info, st->nents = 0; sg = st->sgl; - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { - unsigned int alignment_pad = 0; - - if (rem_info->plane_alignment) - alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; - - sg = remap_pages(obj, - rem_info->plane[i].offset, alignment_pad, - rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, - st, sg); - - gtt_offset += alignment_pad + - rem_info->plane[i].dst_stride * rem_info->plane[i].height; - } + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) + sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset); i915_sg_trim(st); @@ -1049,9 +1180,8 @@ intel_partial_pages(const struct i915_ggtt_view *view, struct drm_i915_gem_object *obj) { struct sg_table *st; - struct scatterlist *sg, *iter; + struct scatterlist *sg; unsigned int count = view->partial.size; - unsigned int offset; int ret = -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -1062,34 +1192,14 @@ intel_partial_pages(const struct i915_ggtt_view *view, if (ret) goto err_sg_alloc; - iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); - GEM_BUG_ON(!iter); - - sg = st->sgl; st->nents = 0; - do { - unsigned int len; - - len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), - count << PAGE_SHIFT); - sg_set_page(sg, NULL, len, 0); - sg_dma_address(sg) = - sg_dma_address(iter) + (offset << PAGE_SHIFT); - sg_dma_len(sg) = len; - st->nents++; - count -= len >> PAGE_SHIFT; - if (count == 0) { - sg_mark_end(sg); - i915_sg_trim(st); /* Drop any unused tail entries. */ + sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); - return st; - } + sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ - sg = __sg_next(sg); - iter = __sg_next(iter); - offset = 0; - } while (1); + return st; err_sg_alloc: kfree(st); @@ -1101,7 +1211,6 @@ static int __i915_vma_get_pages(struct i915_vma *vma) { struct sg_table *pages; - int ret; /* * The vma->pages are only valid within the lifespan of the borrowed @@ -1134,18 +1243,16 @@ __i915_vma_get_pages(struct i915_vma *vma) break; } - ret = 0; if (IS_ERR(pages)) { - ret = PTR_ERR(pages); - pages = NULL; drm_err(&vma->vm->i915->drm, - "Failed to get pages for VMA view type %u (%d)!\n", - vma->ggtt_view.type, ret); + "Failed to get pages for VMA view type %u (%ld)!\n", + vma->ggtt_view.type, PTR_ERR(pages)); + return PTR_ERR(pages); } vma->pages = pages; - return ret; + return 0; } I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) @@ -1177,25 +1284,14 @@ err_unpin: static void __vma_put_pages(struct i915_vma *vma, unsigned int count) { /* We allocate under vma_get_pages, so beware the shrinker */ - struct sg_table *pages = READ_ONCE(vma->pages); - GEM_BUG_ON(atomic_read(&vma->pages_count) < count); if (atomic_sub_return(count, &vma->pages_count) == 0) { - /* - * The atomic_sub_return is a read barrier for the READ_ONCE of - * vma->pages above. - * - * READ_ONCE is safe because this is either called from the same - * function (i915_vma_pin_ww), or guarded by vma->vm->mutex. - * - * TODO: We're leaving vma->pages dangling, until vma->obj->resv - * lock is required. - */ - if (pages != vma->obj->mm.pages) { - sg_free_table(pages); - kfree(pages); + if (vma->pages != vma->obj->mm.pages) { + sg_free_table(vma->pages); + kfree(vma->pages); } + vma->pages = NULL; i915_gem_object_unpin_pages(vma->obj); } @@ -1228,6 +1324,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, { struct i915_vma_work *work = NULL; struct dma_fence *moving = NULL; + struct i915_vma_resource *vma_res = NULL; intel_wakeref_t wakeref = 0; unsigned int bound; int err; @@ -1241,7 +1338,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); /* First try and grab the pin without rebinding the vma */ - if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) + if (try_qad_pin(vma, flags)) return 0; err = i915_vma_get_pages(vma); @@ -1282,6 +1379,12 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, } } + vma_res = i915_vma_resource_alloc(); + if (IS_ERR(vma_res)) { + err = PTR_ERR(vma_res); + goto err_fence; + } + /* * Differentiate between user/kernel vma inside the aliasing-ppgtt. * @@ -1302,7 +1405,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, err = mutex_lock_interruptible_nested(&vma->vm->mutex, !(flags & PIN_GLOBAL)); if (err) - goto err_fence; + goto err_vma_res; /* No more allocations allowed now we hold vm->mutex */ @@ -1323,7 +1426,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, } if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { - __i915_vma_pin(vma); + if (!(flags & PIN_VALIDATE)) + __i915_vma_pin(vma); goto err_unlock; } @@ -1332,7 +1436,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, goto err_unlock; if (!(bound & I915_VMA_BIND_MASK)) { - err = i915_vma_insert(vma, size, alignment, flags); + err = i915_vma_insert(vma, ww, size, alignment, flags); if (err) goto err_active; @@ -1343,7 +1447,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!vma->pages); err = i915_vma_bind(vma, vma->obj->cache_level, - flags, work); + flags, work, vma_res); + vma_res = NULL; if (err) goto err_remove; @@ -1352,8 +1457,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); list_move_tail(&vma->vm_link, &vma->vm->bound_list); - __i915_vma_pin(vma); - GEM_BUG_ON(!i915_vma_is_pinned(vma)); + if (!(flags & PIN_VALIDATE)) { + __i915_vma_pin(vma); + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + } GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); @@ -1366,6 +1473,8 @@ err_active: i915_active_release(&vma->active); err_unlock: mutex_unlock(&vma->vm->mutex); +err_vma_res: + i915_vma_resource_free(vma_res); err_fence: if (work) dma_fence_work_commit_imm(&work->base); @@ -1412,7 +1521,12 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, /* Unlike i915_vma_pin, we don't take no for an answer! */ flush_idle_contexts(vm->gt); if (mutex_lock_interruptible(&vm->mutex) == 0) { - i915_gem_evict_vm(vm); + /* + * We pass NULL ww here, as we don't want to unbind + * locked objects when called from execbuf when pinning + * is removed. This would probably regress badly. + */ + i915_gem_evict_vm(vm, NULL); mutex_unlock(&vm->mutex); } } while (1); @@ -1516,6 +1630,7 @@ void i915_vma_release(struct kref *ref) i915_vm_put(vma->vm); i915_active_fini(&vma->active); + GEM_WARN_ON(vma->resource); i915_vma_free(vma); } @@ -1548,8 +1663,16 @@ void i915_vma_parked(struct intel_gt *gt) struct drm_i915_gem_object *obj = vma->obj; struct i915_address_space *vm = vma->vm; - INIT_LIST_HEAD(&vma->closed_link); - __i915_vma_put(vma); + if (i915_gem_object_trylock(obj, NULL)) { + INIT_LIST_HEAD(&vma->closed_link); + __i915_vma_put(vma); + i915_gem_object_unlock(obj); + } else { + /* back you go.. */ + spin_lock_irq(>->closed_lock); + list_add(&vma->closed_link, >->closed_vma); + spin_unlock_irq(>->closed_lock); + } i915_gem_object_put(obj); i915_vm_close(vm); @@ -1600,8 +1723,6 @@ static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request * { int err; - GEM_BUG_ON(!i915_vma_is_pinned(vma)); - /* Wait for the vma to be bound before we start! */ err = __i915_request_await_bind(rq, vma); if (err) @@ -1620,6 +1741,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma, assert_object_held(obj); + GEM_BUG_ON(!vma->pages); + err = __i915_vma_move_to_active(vma, rq); if (unlikely(err)) return err; @@ -1662,9 +1785,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma, return 0; } -void __i915_vma_evict(struct i915_vma *vma) +struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) { + struct i915_vma_resource *vma_res = vma->resource; + struct dma_fence *unbind_fence; + GEM_BUG_ON(i915_vma_is_pinned(vma)); + assert_vma_held_evict(vma); if (i915_vma_is_map_and_fenceable(vma)) { /* Force a pagefault for domain tracking on next user access */ @@ -1694,15 +1821,36 @@ void __i915_vma_evict(struct i915_vma *vma) GEM_BUG_ON(vma->fence); GEM_BUG_ON(i915_vma_has_userfault(vma)); - if (likely(atomic_read(&vma->vm->open))) { - trace_i915_vma_unbind(vma); - vma->ops->unbind_vma(vma->vm, vma); - } + /* Object backend must be async capable. */ + GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt); + + /* If vm is not open, unbind is a nop. */ + vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) && + atomic_read(&vma->vm->open); + trace_i915_vma_unbind(vma); + + unbind_fence = i915_vma_resource_unbind(vma_res); + vma->resource = NULL; + atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), &vma->flags); i915_vma_detach(vma); + + if (!async && unbind_fence) { + dma_fence_wait(unbind_fence, false); + dma_fence_put(unbind_fence); + unbind_fence = NULL; + } + + /* + * Binding itself may not have completed until the unbind fence signals, + * so don't drop the pages until that happens, unless the resource is + * async_capable. + */ + vma_unbind_pages(vma); + return unbind_fence; } int __i915_vma_unbind(struct i915_vma *vma) @@ -1710,6 +1858,7 @@ int __i915_vma_unbind(struct i915_vma *vma) int ret; lockdep_assert_held(&vma->vm->mutex); + assert_vma_held_evict(vma); if (!drm_mm_node_allocated(&vma->node)) return 0; @@ -1729,18 +1878,55 @@ int __i915_vma_unbind(struct i915_vma *vma) return ret; GEM_BUG_ON(i915_vma_is_active(vma)); - __i915_vma_evict(vma); + __i915_vma_evict(vma, false); drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ return 0; } +static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma) +{ + struct dma_fence *fence; + + lockdep_assert_held(&vma->vm->mutex); + + if (!drm_mm_node_allocated(&vma->node)) + return NULL; + + if (i915_vma_is_pinned(vma) || + &vma->obj->mm.rsgt->table != vma->resource->bi.pages) + return ERR_PTR(-EAGAIN); + + /* + * We probably need to replace this with awaiting the fences of the + * object's dma_resv when the vma active goes away. When doing that + * we need to be careful to not add the vma_resource unbind fence + * immediately to the object's dma_resv, because then unbinding + * the next vma from the object, in case there are many, will + * actually await the unbinding of the previous vmas, which is + * undesirable. + */ + if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active, + I915_ACTIVE_AWAIT_EXCL | + I915_ACTIVE_AWAIT_ACTIVE) < 0) { + return ERR_PTR(-EBUSY); + } + + fence = __i915_vma_evict(vma, true); + + drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ + + return fence; +} + int i915_vma_unbind(struct i915_vma *vma) { struct i915_address_space *vm = vma->vm; intel_wakeref_t wakeref = 0; int err; + assert_object_held_shared(vma->obj); + /* Optimistic wait before taking the mutex */ err = i915_vma_sync(vma); if (err) @@ -1771,6 +1957,79 @@ out_rpm: return err; } +int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) +{ + struct drm_i915_gem_object *obj = vma->obj; + struct i915_address_space *vm = vma->vm; + intel_wakeref_t wakeref = 0; + struct dma_fence *fence; + int err; + + /* + * We need the dma-resv lock since we add the + * unbind fence to the dma-resv object. + */ + assert_object_held(obj); + + if (!drm_mm_node_allocated(&vma->node)) + return 0; + + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); + return -EAGAIN; + } + + if (!obj->mm.rsgt) + return -EBUSY; + + err = dma_resv_reserve_shared(obj->base.resv, 1); + if (err) + return -EBUSY; + + /* + * It would be great if we could grab this wakeref from the + * async unbind work if needed, but we can't because it uses + * kmalloc and it's in the dma-fence signalling critical path. + */ + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + + if (trylock_vm && !mutex_trylock(&vm->mutex)) { + err = -EBUSY; + goto out_rpm; + } else if (!trylock_vm) { + err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref); + if (err) + goto out_rpm; + } + + fence = __i915_vma_unbind_async(vma); + mutex_unlock(&vm->mutex); + if (IS_ERR_OR_NULL(fence)) { + err = PTR_ERR_OR_ZERO(fence); + goto out_rpm; + } + + dma_resv_add_shared_fence(obj->base.resv, fence); + dma_fence_put(fence); + +out_rpm: + if (wakeref) + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); + return err; +} + +int i915_vma_unbind_unlocked(struct i915_vma *vma) +{ + int err; + + i915_gem_object_lock(vma->obj, NULL); + err = i915_vma_unbind(vma); + i915_gem_object_unlock(vma->obj); + + return err; +} + struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) { i915_gem_object_make_unshrinkable(vma->obj); |