diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2016-08-04 18:32:32 +0300 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2016-08-04 22:19:59 +0300 |
commit | 3272db53136f6be7555fb294db3a6e3f372b9380 (patch) | |
tree | 6cae627603d8cc51b6cf54b32ad1ed8f87952d77 /drivers/gpu | |
parent | 59bfa1248e22d65e6273eec6f8043c8e4450c2ba (diff) | |
download | linux-3272db53136f6be7555fb294db3a6e3f372b9380.tar.xz |
drm/i915: Combine all i915_vma bitfields into a single set of flags
In preparation to perform some magic to speed up i915_vma_pin(), which
is among the hottest of hot paths in execbuf, refactor all the bitfields
accessed by i915_vma_pin() into a single unified set of flags.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-16-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.h | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_shrinker.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_stolen.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gpu_error.c | 4 |
9 files changed, 90 insertions, 74 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4401a2ab9a74..8d47d1bf7b85 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -125,7 +125,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) struct i915_vma *vma; list_for_each_entry(vma, &obj->vma_list, obj_link) { - if (vma->is_ggtt && drm_mm_node_allocated(&vma->node)) + if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node)) size += vma->node.size; } @@ -181,9 +181,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) continue; seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", - vma->is_ggtt ? "g" : "pp", + i915_vma_is_ggtt(vma) ? "g" : "pp", vma->node.start, vma->node.size); - if (vma->is_ggtt) + if (i915_vma_is_ggtt(vma)) seq_printf(m, ", type: %u", vma->ggtt_view.type); seq_puts(m, ")"); } @@ -356,7 +356,7 @@ static int per_file_stats(int id, void *ptr, void *data) if (!drm_mm_node_allocated(&vma->node)) continue; - if (vma->is_ggtt) { + if (i915_vma_is_ggtt(vma)) { stats->global += vma->node.size; } else { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d38275fbbfc0..3ade87b1a575 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2861,7 +2861,8 @@ int i915_vma_unbind(struct i915_vma *vma) GEM_BUG_ON(obj->bind_count == 0); GEM_BUG_ON(!obj->pages); - if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { + if (i915_vma_is_ggtt(vma) && + vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { i915_gem_object_finish_gtt(obj); /* release the fence reg _after_ flushing */ @@ -2876,12 +2877,12 @@ int i915_vma_unbind(struct i915_vma *vma) trace_i915_vma_unbind(vma); vma->vm->unbind_vma(vma); } - vma->bound = 0; + vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); drm_mm_remove_node(&vma->node); list_move_tail(&vma->vm_link, &vma->vm->unbound_list); - if (vma->is_ggtt) { + if (i915_vma_is_ggtt(vma)) { if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { obj->map_and_fenceable = false; } else if (vma->ggtt_view.pages) { @@ -2904,7 +2905,7 @@ int i915_vma_unbind(struct i915_vma *vma) i915_gem_object_unpin_pages(obj); destroy: - if (unlikely(vma->closed)) + if (unlikely(i915_vma_is_closed(vma))) i915_vma_destroy(vma); return 0; @@ -2985,7 +2986,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) u64 min_alignment; int ret; - GEM_BUG_ON(vma->bound); + GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); size = max(size, vma->size); @@ -3707,13 +3708,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - unsigned int bound = vma->bound; + unsigned int bound; int ret; GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); - GEM_BUG_ON((flags & PIN_GLOBAL) && !vma->is_ggtt); + GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); - if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) + bound = vma->flags; + if (WARN_ON((bound & I915_VMA_PIN_MASK) == I915_VMA_PIN_MASK)) return -EBUSY; /* Pin early to prevent the shrinker/eviction logic from destroying @@ -3721,7 +3723,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) */ __i915_vma_pin(vma); - if (!bound) { + if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) { ret = i915_vma_insert(vma, size, alignment, flags); if (ret) goto err; @@ -3731,7 +3733,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) if (ret) goto err; - if ((bound ^ vma->bound) & GLOBAL_BIND) + if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) __i915_vma_set_map_and_fenceable(vma); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); @@ -4032,9 +4034,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) * unbound now. */ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { - GEM_BUG_ON(!vma->is_ggtt); + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(i915_vma_is_active(vma)); - vma->pin_count = 0; + vma->flags &= ~I915_VMA_PIN_MASK; i915_vma_close(vma); } GEM_BUG_ON(obj->bind_count); @@ -4094,7 +4096,8 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, GEM_BUG_ON(!view); list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) + if (i915_vma_is_ggtt(vma) && + i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma; return NULL; } @@ -4583,7 +4586,7 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); list_for_each_entry(vma, &o->vma_list, obj_link) { - if (vma->is_ggtt && + if (i915_vma_is_ggtt(vma) && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; if (vma->vm == vm) @@ -4601,7 +4604,8 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) + if (i915_vma_is_ggtt(vma) && + i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma->node.start; WARN(1, "global vma for this object not found. (view=%u)\n", view->type); @@ -4614,7 +4618,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) { - if (vma->is_ggtt && + if (i915_vma_is_ggtt(vma) && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) @@ -4630,7 +4634,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->is_ggtt && + if (i915_vma_is_ggtt(vma) && i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) return true; @@ -4645,7 +4649,7 @@ unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) GEM_BUG_ON(list_empty(&o->vma_list)); list_for_each_entry(vma, &o->vma_list, obj_link) { - if (vma->is_ggtt && + if (i915_vma_is_ggtt(vma) && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) return vma->node.size; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index eff6d3953ecd..dc7c0ae73b62 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -219,7 +219,7 @@ static void i915_ppgtt_close(struct i915_address_space *vm) struct i915_vma *vma, *vn; list_for_each_entry_safe(vma, vn, *phase, vm_link) - if (!vma->closed) + if (!i915_vma_is_closed(vma)) i915_vma_close(vma); } } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d95a0432678f..a0c95ab15d57 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -717,7 +717,7 @@ need_reloc_mappable(struct i915_vma *vma) if (entry->relocation_count == 0) return false; - if (!vma->is_ggtt) + if (!i915_vma_is_ggtt(vma)) return false; /* See also use_cpu_reloc() */ @@ -736,7 +736,8 @@ eb_vma_misplaced(struct i915_vma *vma) struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_object *obj = vma->obj; - WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt); + WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && + !i915_vma_is_ggtt(vma)); if (entry->alignment && vma->node.start & (entry->alignment - 1)) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 84b786cf490f..af012b1cd1ec 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2652,7 +2652,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally * upgrade to both bound if we bind either to avoid double-binding. */ - vma->bound |= GLOBAL_BIND | LOCAL_BIND; + vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; return 0; } @@ -2674,14 +2674,14 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; - if (flags & GLOBAL_BIND) { + if (flags & I915_VMA_GLOBAL_BIND) { vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, vma->node.start, cache_level, pte_flags); } - if (flags & LOCAL_BIND) { + if (flags & I915_VMA_LOCAL_BIND) { struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt; appgtt->base.insert_entries(&appgtt->base, @@ -2698,12 +2698,12 @@ static void ggtt_unbind_vma(struct i915_vma *vma) struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt; const u64 size = min(vma->size, vma->node.size); - if (vma->bound & GLOBAL_BIND) + if (vma->flags & I915_VMA_GLOBAL_BIND) vma->vm->clear_range(vma->vm, vma->node.start, size, true); - if (vma->bound & LOCAL_BIND && appgtt) + if (vma->flags & I915_VMA_LOCAL_BIND && appgtt) appgtt->base.clear_range(&appgtt->base, vma->node.start, size, true); @@ -3334,7 +3334,7 @@ i915_vma_retire(struct i915_gem_active *active, return; list_move_tail(&vma->vm_link, &vma->vm->inactive_list); - if (unlikely(vma->closed && !i915_vma_is_pinned(vma))) + if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) WARN_ON(i915_vma_unbind(vma)); } @@ -3342,10 +3342,10 @@ void i915_vma_destroy(struct i915_vma *vma) { GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(i915_vma_is_active(vma)); - GEM_BUG_ON(!vma->closed); + GEM_BUG_ON(!i915_vma_is_closed(vma)); list_del(&vma->vm_link); - if (!vma->is_ggtt) + if (!i915_vma_is_ggtt(vma)) i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); @@ -3353,8 +3353,8 @@ void i915_vma_destroy(struct i915_vma *vma) void i915_vma_close(struct i915_vma *vma) { - GEM_BUG_ON(vma->closed); - vma->closed = true; + GEM_BUG_ON(i915_vma_is_closed(vma)); + vma->flags |= I915_VMA_CLOSED; list_del_init(&vma->obj_link); if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) @@ -3386,9 +3386,9 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, vma->vm = vm; vma->obj = obj; vma->size = obj->base.size; - vma->is_ggtt = i915_is_ggtt(vm); if (i915_is_ggtt(vm)) { + vma->flags |= I915_VMA_GGTT; vma->ggtt_view = *view; if (view->type == I915_GGTT_VIEW_PARTIAL) { vma->size = view->params.partial.size; @@ -3433,7 +3433,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, if (!vma) vma = __i915_gem_vma_create(obj, &ggtt->base, view); - GEM_BUG_ON(vma->closed); + GEM_BUG_ON(i915_vma_is_closed(vma)); return vma; } @@ -3644,27 +3644,28 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - int ret; u32 bind_flags; + u32 vma_flags; + int ret; if (WARN_ON(flags == 0)) return -EINVAL; bind_flags = 0; if (flags & PIN_GLOBAL) - bind_flags |= GLOBAL_BIND; + bind_flags |= I915_VMA_GLOBAL_BIND; if (flags & PIN_USER) - bind_flags |= LOCAL_BIND; + bind_flags |= I915_VMA_LOCAL_BIND; + vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); if (flags & PIN_UPDATE) - bind_flags |= vma->bound; + bind_flags |= vma_flags; else - bind_flags &= ~vma->bound; - + bind_flags &= ~vma_flags; if (bind_flags == 0) return 0; - if (vma->bound == 0 && vma->vm->allocate_va_range) { + if (vma_flags == 0 && vma->vm->allocate_va_range) { trace_i915_va_alloc(vma); ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, @@ -3677,8 +3678,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, if (ret) return ret; - vma->bound |= bind_flags; - + vma->flags |= bind_flags; return 0; } @@ -3690,8 +3690,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) if (WARN_ON(!vma->obj->map_and_fenceable)) return IO_ERR_PTR(-ENODEV); - GEM_BUG_ON(!vma->is_ggtt); - GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0); + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); ptr = vma->iomap; if (ptr == NULL) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index c63cc1b4c437..5134c0433abd 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -182,15 +182,28 @@ struct i915_vma { void __iomem *iomap; u64 size; - unsigned int active; - struct i915_gem_active last_read[I915_NUM_ENGINES]; + unsigned int flags; + /** + * How many users have pinned this object in GTT space. The following + * users can each hold at most one reference: pwrite/pread, execbuffer + * (objects are not allowed multiple times for the same batchbuffer), + * and the framebuffer code. When switching/pageflipping, the + * framebuffer code has at most two buffers pinned per crtc. + * + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 + * bits with absolutely no headroom. So use 4 bits. + */ +#define I915_VMA_PIN_MASK 0xf /** Flags and address space this VMA is bound to */ -#define GLOBAL_BIND (1<<0) -#define LOCAL_BIND (1<<1) - unsigned int bound : 4; - bool is_ggtt : 1; - bool closed : 1; +#define I915_VMA_GLOBAL_BIND BIT(5) +#define I915_VMA_LOCAL_BIND BIT(6) + +#define I915_VMA_GGTT BIT(7) +#define I915_VMA_CLOSED BIT(8) + + unsigned int active; + struct i915_gem_active last_read[I915_NUM_ENGINES]; /** * Support different GGTT views into the same object. @@ -215,20 +228,18 @@ struct i915_vma { struct hlist_node exec_node; unsigned long exec_handle; struct drm_i915_gem_exec_object2 *exec_entry; - - /** - * How many users have pinned this object in GTT space. The following - * users can each hold at most one reference: pwrite/pread, execbuffer - * (objects are not allowed multiple times for the same batchbuffer), - * and the framebuffer code. When switching/pageflipping, the - * framebuffer code has at most two buffers pinned per crtc. - * - * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 - * bits with absolutely no headroom. So use 4 bits. */ - unsigned int pin_count:4; -#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf }; +static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_GGTT; +} + +static inline bool i915_vma_is_closed(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_CLOSED; +} + static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) { return vma->active; @@ -625,7 +636,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags); static inline int i915_vma_pin_count(const struct i915_vma *vma) { - return vma->pin_count; + return vma->flags & I915_VMA_PIN_MASK; } static inline bool i915_vma_is_pinned(const struct i915_vma *vma) @@ -635,14 +646,14 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma) static inline void __i915_vma_pin(struct i915_vma *vma) { - vma->pin_count++; + vma->flags++; GEM_BUG_ON(!i915_vma_is_pinned(vma)); } static inline void __i915_vma_unpin(struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_pinned(vma)); - vma->pin_count--; + vma->flags--; } static inline void i915_vma_unpin(struct i915_vma *vma) diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 64d179d4f684..b5776358c05e 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -53,7 +53,7 @@ static bool any_vma_pinned(struct drm_i915_gem_object *obj) struct i915_vma *vma; list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->pin_count) + if (i915_vma_is_pinned(vma)) return true; return false; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index bc91ffe614e2..13279610eeec 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -705,7 +705,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, goto err; } - vma->bound |= GLOBAL_BIND; + vma->flags |= I915_VMA_GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); obj->bind_count++; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d94eb907a23a..cc28ad429dd8 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -669,14 +669,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv, if (i915_is_ggtt(vm)) vma = i915_gem_obj_to_ggtt(src); use_ggtt = (src->cache_level == I915_CACHE_NONE && - vma && (vma->bound & GLOBAL_BIND) && + vma && (vma->flags & I915_VMA_GLOBAL_BIND) && reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end); /* Cannot access stolen address directly, try to use the aperture */ if (src->stolen) { use_ggtt = true; - if (!(vma && vma->bound & GLOBAL_BIND)) + if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND)) goto unwind; reloc_offset = i915_gem_obj_ggtt_offset(src); |