diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 80 |
1 files changed, 57 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 135390d975b6..f51fd9fd4c89 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -26,6 +26,7 @@ #include <linux/dma-fence-array.h> #include <drm/drm_gem.h> +#include "display/intel_display.h" #include "display/intel_frontbuffer.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_tiling.h" @@ -418,8 +419,8 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res, i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, obj->mm.rsgt, i915_gem_object_is_readonly(obj), i915_gem_object_is_lmem(obj), obj->mm.region, - vma->ops, vma->private, vma->node.start, - vma->node.size, vma->size); + vma->ops, vma->private, __i915_vma_offset(vma), + __i915_vma_size(vma), vma->size, vma->guard); } /** @@ -447,7 +448,7 @@ int i915_vma_bind(struct i915_vma *vma, lockdep_assert_held(&vma->vm->mutex); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - GEM_BUG_ON(vma->size > vma->node.size); + GEM_BUG_ON(vma->size > i915_vma_size(vma)); if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, vma->node.size, @@ -569,8 +570,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) vma->obj->base.size); } else if (i915_vma_is_map_and_fenceable(vma)) { ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, - vma->node.start, - vma->node.size); + i915_vma_offset(vma), + i915_vma_size(vma)); } else { ptr = (void __iomem *) i915_gem_object_pin_map(vma->obj, I915_MAP_WC); @@ -659,22 +660,26 @@ bool i915_vma_misplaced(const struct i915_vma *vma, if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) return true; - if (vma->node.size < size) + if (i915_vma_size(vma) < size) return true; GEM_BUG_ON(alignment && !is_power_of_2(alignment)); - if (alignment && !IS_ALIGNED(vma->node.start, alignment)) + if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment)) return true; if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) return true; if (flags & PIN_OFFSET_BIAS && - vma->node.start < (flags & PIN_OFFSET_MASK)) + i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK)) return true; if (flags & PIN_OFFSET_FIXED && - vma->node.start != (flags & PIN_OFFSET_MASK)) + i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK)) + return true; + + if (flags & PIN_OFFSET_GUARD && + vma->guard < (flags & PIN_OFFSET_MASK)) return true; return false; @@ -687,10 +692,11 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!vma->fence_size); - fenceable = (vma->node.size >= vma->fence_size && - IS_ALIGNED(vma->node.start, vma->fence_alignment)); + fenceable = (i915_vma_size(vma) >= vma->fence_size && + IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment)); - mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; + mappable = i915_ggtt_offset(vma) + vma->fence_size <= + i915_vm_to_ggtt(vma->vm)->mappable_end; if (mappable && fenceable) set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); @@ -748,15 +754,16 @@ static int i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u64 size, u64 alignment, u64 flags) { - unsigned long color; + unsigned long color, guard; u64 start, end; int ret; GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1); size = max(size, vma->size); - alignment = max(alignment, vma->display_alignment); + alignment = max_t(typeof(alignment), alignment, vma->display_alignment); if (flags & PIN_MAPPABLE) { size = max_t(typeof(size), size, vma->fence_size); alignment = max_t(typeof(alignment), @@ -767,6 +774,18 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); GEM_BUG_ON(!is_power_of_2(alignment)); + guard = vma->guard; /* retain guard across rebinds */ + if (flags & PIN_OFFSET_GUARD) { + GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32)); + guard = max_t(u32, guard, flags & PIN_OFFSET_MASK); + } + /* + * As we align the node upon insertion, but the hardware gets + * node.start + guard, the easiest way to make that work is + * to make the guard a multiple of the alignment size. + */ + guard = ALIGN(guard, alignment); + start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); @@ -779,11 +798,12 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj)); - /* If binding the object/GGTT view requires more space than the entire + /* + * If binding the object/GGTT view requires more space than the entire * aperture has, reject it early before evicting everything in a vain * attempt to find space. */ - if (size > end) { + if (size > end - 2 * guard) { drm_dbg(&to_i915(vma->obj->base.dev)->drm, "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", size, flags & PIN_MAPPABLE ? "mappable" : "total", end); @@ -800,13 +820,23 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (!IS_ALIGNED(offset, alignment) || range_overflows(offset, size, end)) return -EINVAL; + /* + * The caller knows not of the guard added by others and + * requests for the offset of the start of its buffer + * to be fixed, which may not be the same as the position + * of the vma->node due to the guard pages. + */ + if (offset < guard || offset + size > end - guard) + return -ENOSPC; ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node, - size, offset, color, - flags); + size + 2 * guard, + offset - guard, + color, flags); if (ret) return ret; } else { + size += 2 * guard; /* * We only support huge gtt pages through the 48b PPGTT, * however we also don't want to force any alignment for @@ -854,6 +884,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); list_move_tail(&vma->vm_link, &vma->vm->bound_list); + vma->guard = guard; return 0; } @@ -906,7 +937,7 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, struct sg_table *st, struct scatterlist *sg) { unsigned int column, row; - unsigned int src_idx; + pgoff_t src_idx; for (column = 0; column < width; column++) { unsigned int left; @@ -1012,7 +1043,7 @@ add_padding_pages(unsigned int count, static struct scatterlist * remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, - unsigned int offset, unsigned int alignment_pad, + unsigned long offset, unsigned int alignment_pad, unsigned int width, unsigned int height, unsigned int src_stride, unsigned int dst_stride, struct sg_table *st, struct scatterlist *sg, @@ -1071,7 +1102,7 @@ remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, static struct scatterlist * remap_contiguous_pages(struct drm_i915_gem_object *obj, - unsigned int obj_offset, + pgoff_t obj_offset, unsigned int count, struct sg_table *st, struct scatterlist *sg) { @@ -1104,7 +1135,7 @@ remap_contiguous_pages(struct drm_i915_gem_object *obj, static struct scatterlist * remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, - unsigned int obj_offset, unsigned int alignment_pad, + pgoff_t obj_offset, unsigned int alignment_pad, unsigned int size, struct sg_table *st, struct scatterlist *sg, unsigned int *gtt_offset) @@ -1544,6 +1575,8 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u32 align, unsigned int flags) { struct i915_address_space *vm = vma->vm; + struct intel_gt *gt; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); int err; do { @@ -1559,7 +1592,8 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, } /* Unlike i915_vma_pin, we don't take no for an answer! */ - flush_idle_contexts(vm->gt); + list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) + flush_idle_contexts(gt); if (mutex_lock_interruptible(&vm->mutex) == 0) { /* * We pass NULL ww here, as we don't want to unbind |