diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 548 |
1 files changed, 198 insertions, 350 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 27ea6bdebce7..53394f998a1f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1,5 +1,5 @@ /* - * Copyright © 2008 Intel Corporation + * Copyright © 2008-2015 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -29,9 +29,9 @@ #include <drm/drm_vma_manager.h> #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_vgpu.h" #include "i915_trace.h" #include "intel_drv.h" -#include <linux/oom.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/swap.h> @@ -52,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable); -static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, - struct shrink_control *sc); -static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker, - struct shrink_control *sc); -static int i915_gem_shrinker_oom(struct notifier_block *nb, - unsigned long event, - void *ptr); -static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); - static bool cpu_cache_is_coherent(struct drm_device *dev, enum i915_cache_level level) { @@ -350,7 +341,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; void *vaddr = obj->phys_handle->vaddr + args->offset; char __user *user_data = to_user_ptr(args->data_ptr); - int ret; + int ret = 0; /* We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. @@ -359,6 +350,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { unsigned long unwritten; @@ -369,13 +361,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, mutex_unlock(&dev->struct_mutex); unwritten = copy_from_user(vaddr, user_data, args->size); mutex_lock(&dev->struct_mutex); - if (unwritten) - return -EFAULT; + if (unwritten) { + ret = -EFAULT; + goto out; + } } drm_clflush_virt_range(vaddr, args->size); i915_gem_chipset_flush(dev); - return 0; + +out: + intel_fb_obj_flush(obj, false); + return ret; } void *i915_gem_object_alloc(struct drm_device *dev) @@ -809,6 +806,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, offset = i915_gem_obj_ggtt_offset(obj) + args->offset; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); + while (remain > 0) { /* Operation in this page * @@ -829,7 +828,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, if (fast_user_write(dev_priv->gtt.mappable, page_base, page_offset, user_data, page_length)) { ret = -EFAULT; - goto out_unpin; + goto out_flush; } remain -= page_length; @@ -837,6 +836,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, offset += page_length; } +out_flush: + intel_fb_obj_flush(obj, false); out_unpin: i915_gem_object_ggtt_unpin(obj); out: @@ -951,6 +952,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev, if (ret) return ret; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); + i915_gem_object_pin_pages(obj); offset = args->offset; @@ -1029,6 +1032,7 @@ out: if (needs_clflush_after) i915_gem_chipset_flush(dev); + intel_fb_obj_flush(obj, false); return ret; } @@ -1922,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); } -static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) -{ - return obj->madv == I915_MADV_DONTNEED; -} - /* Immediately discard the backing storage */ static void i915_gem_object_truncate(struct drm_i915_gem_object *obj) @@ -2033,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) return 0; } -unsigned long -i915_gem_shrink(struct drm_i915_private *dev_priv, - long target, unsigned flags) -{ - const struct { - struct list_head *list; - unsigned int bit; - } phases[] = { - { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, - { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, - { NULL, 0 }, - }, *phase; - unsigned long count = 0; - - /* - * As we may completely rewrite the (un)bound list whilst unbinding - * (due to retiring requests) we have to strictly process only - * one element of the list at the time, and recheck the list - * on every iteration. - * - * In particular, we must hold a reference whilst removing the - * object as we may end up waiting for and/or retiring the objects. - * This might release the final reference (held by the active list) - * and result in the object being freed from under us. This is - * similar to the precautions the eviction code must take whilst - * removing objects. - * - * Also note that although these lists do not hold a reference to - * the object we can safely grab one here: The final object - * unreferencing and the bound_list are both protected by the - * dev->struct_mutex and so we won't ever be able to observe an - * object on the bound_list with a reference count equals 0. - */ - for (phase = phases; phase->list; phase++) { - struct list_head still_in_list; - - if ((flags & phase->bit) == 0) - continue; - - INIT_LIST_HEAD(&still_in_list); - while (count < target && !list_empty(phase->list)) { - struct drm_i915_gem_object *obj; - struct i915_vma *vma, *v; - - obj = list_first_entry(phase->list, - typeof(*obj), global_list); - list_move_tail(&obj->global_list, &still_in_list); - - if (flags & I915_SHRINK_PURGEABLE && - !i915_gem_object_is_purgeable(obj)) - continue; - - drm_gem_object_reference(&obj->base); - - /* For the unbound phase, this should be a no-op! */ - list_for_each_entry_safe(vma, v, - &obj->vma_list, vma_link) - if (i915_vma_unbind(vma)) - break; - - if (i915_gem_object_put_pages(obj) == 0) - count += obj->base.size >> PAGE_SHIFT; - - drm_gem_object_unreference(&obj->base); - } - list_splice(&still_in_list, phase->list); - } - - return count; -} - -static unsigned long -i915_gem_shrink_all(struct drm_i915_private *dev_priv) -{ - i915_gem_evict_everything(dev_priv->dev); - return i915_gem_shrink(dev_priv, LONG_MAX, - I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); -} - static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { @@ -2458,10 +2377,11 @@ int __i915_add_request(struct intel_engine_cs *ring, ret = ring->add_request(ring); if (ret) return ret; + + request->tail = intel_ring_get_tail(ringbuf); } request->head = request_start; - request->tail = intel_ring_get_tail(ringbuf); /* Whilst this request exists, batch_obj will be on the * active_list, and so will hold the active reference. Only when this @@ -2492,6 +2412,8 @@ int __i915_add_request(struct intel_engine_cs *ring, list_add_tail(&request->client_list, &file_priv->mm.request_list); spin_unlock(&file_priv->mm.lock); + + request->pid = get_pid(task_pid(current)); } trace_i915_gem_request_add(request); @@ -2572,6 +2494,8 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) list_del(&request->list); i915_gem_request_remove_from_client(request); + put_pid(request->pid); + i915_gem_request_unreference(request); } @@ -2744,7 +2668,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) */ while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; - struct intel_ringbuffer *ringbuf; request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, @@ -2755,23 +2678,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) trace_i915_gem_request_retire(request); - /* This is one of the few common intersection points - * between legacy ringbuffer submission and execlists: - * we need to tell them apart in order to find the correct - * ringbuffer to which the request belongs to. - */ - if (i915.enable_execlists) { - struct intel_context *ctx = request->ctx; - ringbuf = ctx->engine[ring->id].ringbuf; - } else - ringbuf = ring->buffer; - /* We know the GPU must have read the request to have * sent us the seqno + interrupt, so use the position * of tail of the request to update the last known position * of the GPU head. */ - ringbuf->last_retired_head = request->postfix; + request->ringbuf->last_retired_head = request->postfix; i915_gem_free_request(request); } @@ -3516,9 +3428,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view, unsigned alignment, - uint64_t flags, - const struct i915_ggtt_view *view) + uint64_t flags) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -3530,6 +3442,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; + if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return ERR_PTR(-EINVAL); + fence_size = i915_gem_get_gtt_size(dev, obj->base.size, obj->tiling_mode); @@ -3568,7 +3483,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); - vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view); + vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) : + i915_gem_obj_lookup_or_create_vma(obj, vm); + if (IS_ERR(vma)) goto err_unpin; @@ -3598,6 +3515,17 @@ search_free: if (ret) goto err_remove_node; + /* allocate before insert / bind */ + if (vma->vm->allocate_va_range) { + trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size, + VM_TO_TRACE_NAME(vma->vm)); + ret = vma->vm->allocate_va_range(vma->vm, + vma->node.start, + vma->node.size); + if (ret) + goto err_remove_node; + } + trace_i915_vma_bind(vma, flags); ret = i915_vma_bind(vma, obj->cache_level, flags & PIN_GLOBAL ? GLOBAL_BIND : 0); @@ -3768,7 +3696,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) } if (write) - intel_fb_obj_invalidate(obj, NULL); + intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -3950,7 +3878,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined) + struct intel_engine_cs *pipelined, + const struct i915_ggtt_view *view) { u32 old_read_domains, old_write_domain; bool was_pin_display; @@ -3986,7 +3915,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. */ - ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE); + ret = i915_gem_object_ggtt_pin(obj, view, alignment, + view->type == I915_GGTT_VIEW_NORMAL ? + PIN_MAPPABLE : 0); if (ret) goto err_unpin_display; @@ -4014,9 +3945,11 @@ err_unpin_display: } void -i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) +i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) { - i915_gem_object_ggtt_unpin(obj); + i915_gem_object_ggtt_unpin_view(obj, view); + obj->pin_display = is_pin_display(obj); } @@ -4083,7 +4016,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) } if (write) - intel_fb_obj_invalidate(obj, NULL); + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -4165,12 +4098,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) return false; } -int -i915_gem_object_pin_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags, - const struct i915_ggtt_view *view) +static int +i915_gem_object_do_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view, + uint32_t alignment, + uint64_t flags) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct i915_vma *vma; @@ -4186,17 +4119,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) return -EINVAL; - vma = i915_gem_obj_to_vma_view(obj, vm, view); + if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return -EINVAL; + + vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : + i915_gem_obj_to_vma(obj, vm); + + if (IS_ERR(vma)) + return PTR_ERR(vma); + if (vma) { if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; if (i915_vma_misplaced(vma, alignment, flags)) { + unsigned long offset; + offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) : + i915_gem_obj_offset(obj, vm); WARN(vma->pin_count, - "bo is already pinned with incorrect alignment:" + "bo is already pinned in %s with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - i915_gem_obj_offset_view(obj, vm, view->type), + ggtt_view ? "ggtt" : "ppgtt", + offset, alignment, !!(flags & PIN_MAPPABLE), obj->map_and_fenceable); @@ -4210,8 +4155,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, bound = vma ? vma->bound : 0; if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { - vma = i915_gem_object_bind_to_vm(obj, vm, alignment, - flags, view); + /* In true PPGTT, bind has possibly changed PDEs, which + * means we must do a context switch before the GPU can + * accurately read some of the VMAs. + */ + vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, + flags); if (IS_ERR(vma)) return PTR_ERR(vma); } @@ -4237,7 +4186,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, fenceable = (vma->node.size == fence_size && (vma->node.start & (fence_alignment - 1)) == 0); - mappable = (vma->node.start + obj->base.size <= + mappable = (vma->node.start + fence_size <= dev_priv->gtt.mappable_end); obj->map_and_fenceable = mappable && fenceable; @@ -4252,16 +4201,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, return 0; } +int +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + uint32_t alignment, + uint64_t flags) +{ + return i915_gem_object_do_pin(obj, vm, + i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL, + alignment, flags); +} + +int +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + uint32_t alignment, + uint64_t flags) +{ + if (WARN_ONCE(!view, "no view specified")) + return -EINVAL; + + return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, + alignment, flags | PIN_GLOBAL); +} + void -i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) +i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) { - struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); + struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); BUG_ON(!vma); - BUG_ON(vma->pin_count == 0); - BUG_ON(!i915_gem_obj_ggtt_bound(obj)); + WARN_ON(vma->pin_count == 0); + WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view)); - if (--vma->pin_count == 0) + if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL) obj->pin_mappable = false; } @@ -4382,7 +4356,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->madv = args->madv; /* if the object is no longer attached, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) + if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL) i915_gem_object_truncate(obj); args->retained = obj->madv != __I915_MADV_PURGED; @@ -4557,15 +4531,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) intel_runtime_pm_put(dev_priv); } -struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == vm && vma->ggtt_view.type == view->type) + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm) return vma; + } + return NULL; +} + +struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) +{ + struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); + struct i915_vma *vma; + + if (WARN_ONCE(!view, "no view specified")) + return ERR_PTR(-EINVAL); + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view)) + return vma; return NULL; } @@ -4612,10 +4604,6 @@ i915_gem_suspend(struct drm_device *dev) i915_gem_retire_requests(dev); - /* Under UMS, be paranoid and evict. */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_gem_evict_everything(dev); - i915_gem_stop_ringbuffers(dev); mutex_unlock(&dev->struct_mutex); @@ -4986,18 +4974,8 @@ i915_gem_load(struct drm_device *dev) i915_gem_idle_work_handler); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); - /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ - if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) { - I915_WRITE(MI_ARB_STATE, - _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); - } - dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - /* Old X drivers will take 0-2 for front, back, depth buffers */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - dev_priv->fence_reg_start = 3; - if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) dev_priv->num_fence_regs = 32; else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) @@ -5005,6 +4983,10 @@ i915_gem_load(struct drm_device *dev) else dev_priv->num_fence_regs = 8; + if (intel_vgpu_active(dev)) + dev_priv->num_fence_regs = + I915_READ(vgtif_reg(avail_rs.fence_num)); + /* Initialize fence registers to zero */ INIT_LIST_HEAD(&dev_priv->mm.fence_list); i915_gem_restore_fences(dev); @@ -5014,13 +4996,7 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.interruptible = true; - dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; - dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; - dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; - register_shrinker(&dev_priv->mm.shrinker); - - dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; - register_oom_notifier(&dev_priv->mm.oom_notifier); + i915_gem_shrinker_init(dev_priv); i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool); @@ -5112,106 +5088,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, } } -static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) -{ - if (!mutex_is_locked(mutex)) - return false; - -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) - return mutex->owner == task; -#else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ - return false; -#endif -} - -static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) +/* All the new VM stuff */ +unsigned long +i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm) { - if (!mutex_trylock(&dev->struct_mutex)) { - if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return false; + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_vma *vma; - if (to_i915(dev)->mm.shrinker_no_lock_stealing) - return false; + WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - *unlock = false; - } else - *unlock = true; + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm) + return vma->node.start; + } - return true; + WARN(1, "%s vma for this object not found.\n", + i915_is_ggtt(vm) ? "global" : "ppgtt"); + return -1; } -static int num_vma_bound(struct drm_i915_gem_object *obj) +unsigned long +i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - int count = 0; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (drm_mm_node_allocated(&vma->node)) - count++; - - return count; -} - -static unsigned long -i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) -{ - struct drm_i915_private *dev_priv = - container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj; - unsigned long count; - bool unlock; - - if (!i915_gem_shrinker_lock(dev, &unlock)) - return 0; - - count = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) - if (obj->pages_pin_count == 0) - count += obj->base.size >> PAGE_SHIFT; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!i915_gem_obj_is_pinned(obj) && - obj->pages_pin_count == num_vma_bound(obj)) - count += obj->base.size >> PAGE_SHIFT; - } - - if (unlock) - mutex_unlock(&dev->struct_mutex); + list_for_each_entry(vma, &o->vma_list, vma_link) + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view)) + return vma->node.start; - return count; + WARN(1, "global vma for this object not found.\n"); + return -1; } -/* All the new VM stuff */ -unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view) +bool i915_gem_obj_bound(struct drm_i915_gem_object *o, + struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (vma->vm == vm && vma->ggtt_view.type == view) - return vma->node.start; - + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) + return true; } - WARN(1, "%s vma for this object not found.\n", - i915_is_ggtt(vm) ? "global" : "ppgtt"); - return -1; + + return false; } -bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view) +bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, vma_link) - if (vma->vm == vm && - vma->ggtt_view.type == view && + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) return true; @@ -5239,118 +5179,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, BUG_ON(list_empty(&o->vma_list)); - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; if (vma->vm == vm) return vma->node.size; - + } return 0; } -static unsigned long -i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) -{ - struct drm_i915_private *dev_priv = - container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; - unsigned long freed; - bool unlock; - - if (!i915_gem_shrinker_lock(dev, &unlock)) - return SHRINK_STOP; - - freed = i915_gem_shrink(dev_priv, - sc->nr_to_scan, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE); - if (freed < sc->nr_to_scan) - freed += i915_gem_shrink(dev_priv, - sc->nr_to_scan - freed, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND); - if (unlock) - mutex_unlock(&dev->struct_mutex); - - return freed; -} - -static int -i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) +bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = - container_of(nb, struct drm_i915_private, mm.oom_notifier); - struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj; - unsigned long timeout = msecs_to_jiffies(5000) + 1; - unsigned long pinned, bound, unbound, freed_pages; - bool was_interruptible; - bool unlock; - - while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { - schedule_timeout_killable(1); - if (fatal_signal_pending(current)) - return NOTIFY_DONE; - } - if (timeout == 0) { - pr_err("Unable to purge GPU memory due lock contention.\n"); - return NOTIFY_DONE; - } - - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; - - freed_pages = i915_gem_shrink_all(dev_priv); - - dev_priv->mm.interruptible = was_interruptible; - - /* Because we may be allocating inside our own driver, we cannot - * assert that there are no objects with pinned pages that are not - * being pointed to by hardware. - */ - unbound = bound = pinned = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { - if (!obj->base.filp) /* not backed by a freeable object */ - continue; - - if (obj->pages_pin_count) - pinned += obj->base.size; - else - unbound += obj->base.size; - } - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!obj->base.filp) + struct i915_vma *vma; + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; - - if (obj->pages_pin_count) - pinned += obj->base.size; - else - bound += obj->base.size; + if (vma->pin_count > 0) + return true; } - - if (unlock) - mutex_unlock(&dev->struct_mutex); - - if (freed_pages || unbound || bound) - pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", - freed_pages << PAGE_SHIFT, pinned); - if (unbound || bound) - pr_err("%lu and %lu bytes still available in the " - "bound and unbound GPU page lists.\n", - bound, unbound); - - *(unsigned long *)ptr += freed_pages; - return NOTIFY_DONE; + return false; } -struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) -{ - struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); - struct i915_vma *vma; - - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == ggtt && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) - return vma; - - return NULL; -} |