diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_pages.c')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_pages.c | 109 |
1 files changed, 69 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 43028f3539a6..aed8a37ccdc9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -19,7 +19,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, bool shrinkable; int i; - lockdep_assert_held(&obj->mm.lock); + assert_object_held_shared(obj); if (i915_gem_object_is_volatile(obj)) obj->mm.madv = I915_MADV_DONTNEED; @@ -70,6 +70,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct list_head *list; unsigned long flags; + assert_object_held(obj); spin_lock_irqsave(&i915->mm.obj_lock, flags); i915->mm.shrink_count++; @@ -91,6 +92,8 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); int err; + assert_object_held_shared(obj); + if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { drm_dbg(&i915->drm, "Attempting to obtain a purgeable object\n"); @@ -114,23 +117,41 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { int err; - err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); - if (err) - return err; + assert_object_held(obj); + + assert_object_held_shared(obj); if (unlikely(!i915_gem_object_has_pages(obj))) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); err = ____i915_gem_object_get_pages(obj); if (err) - goto unlock; + return err; smp_mb__before_atomic(); } atomic_inc(&obj->mm.pages_pin_count); -unlock: - mutex_unlock(&obj->mm.lock); + return 0; +} + +int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) +{ + struct i915_gem_ww_ctx ww; + int err; + + i915_gem_ww_ctx_init(&ww, true); +retry: + err = i915_gem_object_lock(obj, &ww); + if (!err) + err = i915_gem_object_pin_pages(obj); + + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); return err; } @@ -145,7 +166,7 @@ void i915_gem_object_truncate(struct drm_i915_gem_object *obj) /* Try to discard unwanted pages */ void i915_gem_object_writeback(struct drm_i915_gem_object *obj) { - lockdep_assert_held(&obj->mm.lock); + assert_object_held_shared(obj); GEM_BUG_ON(i915_gem_object_has_pages(obj)); if (obj->ops->writeback) @@ -176,6 +197,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { struct sg_table *pages; + assert_object_held_shared(obj); + pages = fetch_and_zero(&obj->mm.pages); if (IS_ERR_OR_NULL(pages)) return pages; @@ -199,17 +222,12 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) { struct sg_table *pages; - int err; if (i915_gem_object_has_pinned_pages(obj)) return -EBUSY; /* May be called by shrinker from within get_pages() (on another bo) */ - mutex_lock(&obj->mm.lock); - if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { - err = -EBUSY; - goto unlock; - } + assert_object_held_shared(obj); i915_gem_object_release_mmap_offset(obj); @@ -226,17 +244,10 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) * get_pages backends we should be better able to handle the * cancellation of the async task in a more uniform manner. */ - if (!pages && !i915_gem_object_needs_async_cancel(obj)) - pages = ERR_PTR(-EINVAL); - - if (!IS_ERR(pages)) + if (!IS_ERR_OR_NULL(pages)) obj->ops->put_pages(obj, pages); - err = 0; -unlock: - mutex_unlock(&obj->mm.lock); - - return err; + return 0; } /* The 'mapping' part of i915_gem_object_pin_map() below */ @@ -333,18 +344,15 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { enum i915_map_type has_type; - unsigned int flags; bool pinned; void *ptr; int err; - flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM; - if (!i915_gem_object_type_has(obj, flags)) + if (!i915_gem_object_has_struct_page(obj) && + !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) return ERR_PTR(-ENXIO); - err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); - if (err) - return ERR_PTR(err); + assert_object_held(obj); pinned = !(type & I915_MAP_OVERRIDE); type &= ~I915_MAP_OVERRIDE; @@ -354,10 +362,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); err = ____i915_gem_object_get_pages(obj); - if (err) { - ptr = ERR_PTR(err); - goto out_unlock; - } + if (err) + return ERR_PTR(err); smp_mb__before_atomic(); } @@ -392,13 +398,23 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, obj->mm.mapping = page_pack_bits(ptr, type); } -out_unlock: - mutex_unlock(&obj->mm.lock); return ptr; err_unpin: atomic_dec(&obj->mm.pages_pin_count); - goto out_unlock; + return ptr; +} + +void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, + enum i915_map_type type) +{ + void *ret; + + i915_gem_object_lock(obj, NULL); + ret = i915_gem_object_pin_map(obj, type); + i915_gem_object_unlock(obj); + + return ret; } void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, @@ -448,7 +464,8 @@ struct scatterlist * __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, struct i915_gem_object_page_iter *iter, unsigned int n, - unsigned int *offset) + unsigned int *offset, + bool allow_alloc) { const bool dma = iter == &obj->mm.get_dma_page; struct scatterlist *sg; @@ -470,6 +487,9 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, if (n < READ_ONCE(iter->sg_idx)) goto lookup; + if (!allow_alloc) + goto manual_lookup; + mutex_lock(&iter->lock); /* We prefer to reuse the last sg so that repeated lookup of this @@ -519,7 +539,16 @@ scan: if (unlikely(n < idx)) /* insertion completed by another thread */ goto lookup; - /* In case we failed to insert the entry into the radixtree, we need + goto manual_walk; + +manual_lookup: + idx = 0; + sg = obj->mm.pages->sgl; + count = __sg_page_count(sg); + +manual_walk: + /* + * In case we failed to insert the entry into the radixtree, we need * to look beyond the current sg. */ while (idx + count <= n) { @@ -566,7 +595,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); - sg = i915_gem_object_get_sg(obj, n, &offset); + sg = i915_gem_object_get_sg(obj, n, &offset, true); return nth_page(sg_page(sg), offset); } @@ -592,7 +621,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, struct scatterlist *sg; unsigned int offset; - sg = i915_gem_object_get_sg_dma(obj, n, &offset); + sg = i915_gem_object_get_sg_dma(obj, n, &offset, true); if (len) *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); |