summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c890
1 files changed, 662 insertions, 228 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 80e5ba490dc2..40727d4c2919 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,8 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
#include <linux/swap.h>
#include <linux/pci.h>
@@ -47,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev);
+static int i915_gem_evict_something(struct drm_device *dev, int min_size);
+static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
+static LIST_HEAD(shrink_list);
+static DEFINE_SPINLOCK(shrink_list_lock);
+
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
@@ -111,7 +117,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_create *args = data;
struct drm_gem_object *obj;
- int handle, ret;
+ int ret;
+ u32 handle;
args->size = roundup(args->size, PAGE_SIZE);
@@ -314,6 +321,45 @@ fail_unlock:
return ret;
}
+static inline gfp_t
+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
+{
+ return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
+}
+
+static inline void
+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
+{
+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+}
+
+static int
+i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = i915_gem_object_get_pages(obj);
+
+ /* If we've insufficient memory to map in the pages, attempt
+ * to make some space by throwing out some old buffers.
+ */
+ if (ret == -ENOMEM) {
+ struct drm_device *dev = obj->dev;
+ gfp_t gfp;
+
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
+ return ret;
+
+ gfp = i915_gem_object_get_page_gfp_mask(obj);
+ i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
+ ret = i915_gem_object_get_pages(obj);
+ i915_gem_object_set_page_gfp_mask (obj, gfp);
+ }
+
+ return ret;
+}
+
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -365,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
goto fail_unlock;
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
@@ -840,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
goto fail_unlock;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
@@ -981,6 +1027,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
@@ -1004,15 +1051,17 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
+ obj_priv = obj->driver_private;
mutex_lock(&dev->struct_mutex);
+
+ intel_mark_busy(dev, obj);
+
#if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
obj, obj->size, read_domains, write_domain);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
/* Update the LRU on the fence for the CPU access that's
@@ -1150,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
+ if (ret)
+ goto unlock;
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
}
/* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
+ if (ret)
+ goto unlock;
}
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
@@ -1179,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-
+unlock:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ return VM_FAULT_NOPAGE;
case -ENOMEM:
case -EAGAIN:
return VM_FAULT_OOM;
- case -EFAULT:
- case -EINVAL:
- return VM_FAULT_SIGBUS;
default:
- return VM_FAULT_NOPAGE;
+ return VM_FAULT_SIGBUS;
}
}
@@ -1383,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+
if (!obj_priv->mmap_offset) {
ret = i915_gem_create_mmap_offset(obj);
if (ret) {
@@ -1394,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
args->offset = obj_priv->mmap_offset;
- obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
-
- /* Make sure the alignment is correct for fence regs etc */
- if (obj_priv->agp_mem &&
- (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
/*
* Pull it into the GTT so that we have a page list (makes the
* initial fault faster and any subsequent flushing possible).
*/
if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
if (ret) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -1432,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
int i;
BUG_ON(obj_priv->pages_refcount == 0);
+ BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
if (--obj_priv->pages_refcount != 0)
return;
@@ -1439,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
if (obj_priv->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
- for (i = 0; i < page_count; i++)
- if (obj_priv->pages[i] != NULL) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ if (obj_priv->madv == I915_MADV_DONTNEED)
+ obj_priv->dirty = 0;
+
+ for (i = 0; i < page_count; i++) {
+ if (obj_priv->pages[i] == NULL)
+ break;
+
+ if (obj_priv->dirty)
+ set_page_dirty(obj_priv->pages[i]);
+
+ if (obj_priv->madv == I915_MADV_WILLNEED)
mark_page_accessed(obj_priv->pages[i]);
- page_cache_release(obj_priv->pages[i]);
- }
+
+ page_cache_release(obj_priv->pages[i]);
+ }
obj_priv->dirty = 0;
drm_free_large(obj_priv->pages);
@@ -1484,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
obj_priv->last_rendering_seqno = 0;
}
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct inode *inode;
+
+ inode = obj->filp->f_path.dentry->d_inode;
+ if (inode->i_op->truncate)
+ inode->i_op->truncate (inode);
+
+ obj_priv->madv = __I915_MADV_PURGED;
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+ return obj_priv->madv == I915_MADV_DONTNEED;
+}
+
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
@@ -1572,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
+ uint32_t old_write_domain = obj->write_domain;
+
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
}
}
- if (was_empty && !dev_priv->mm.suspended)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ if (!dev_priv->mm.suspended) {
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ if (was_empty)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ }
return seqno;
}
@@ -1618,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ trace_i915_gem_request_retire(dev, request->seqno);
+
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
@@ -1666,7 +1747,7 @@ out:
/**
* Returns true if seq1 is later than seq2.
*/
-static int
+bool
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
{
return (int32_t)(seq1 - seq2) >= 0;
@@ -1704,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev)
retiring_seqno = request->seqno;
if (i915_seqno_passed(seqno, retiring_seqno) ||
- dev_priv->mm.wedged) {
+ atomic_read(&dev_priv->mm.wedged)) {
i915_gem_retire_request(dev, request);
list_del(&request->list);
@@ -1746,6 +1827,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
BUG_ON(seqno == 0);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EIO;
+
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
if (IS_IGDNG(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1758,16 +1842,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
i915_driver_irq_postinstall(dev);
}
+ trace_i915_gem_request_wait_begin(dev, seqno);
+
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev),
seqno) ||
- dev_priv->mm.wedged);
+ atomic_read(&dev_priv->mm.wedged));
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
+
+ trace_i915_gem_request_wait_end(dev, seqno);
}
- if (dev_priv->mm.wedged)
+ if (atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
if (ret && ret != -ERESTARTSYS)
@@ -1798,6 +1886,8 @@ i915_gem_flush(struct drm_device *dev,
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
+ trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+ invalidate_domains, flush_domains);
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
@@ -1910,6 +2000,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL;
}
+ /* blow away mappings if mapped through GTT */
+ i915_gem_release_mmap(obj);
+
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
@@ -1923,21 +2019,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+ BUG_ON(obj_priv->active);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
obj_priv->agp_mem = NULL;
}
- BUG_ON(obj_priv->active);
-
- /* blow away mappings if mapped through GTT */
- i915_gem_release_mmap(obj);
-
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
i915_gem_object_put_pages(obj);
+ BUG_ON(obj_priv->pages_refcount);
if (obj_priv->gtt_space) {
atomic_dec(&dev->gtt_count);
@@ -1951,40 +2042,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ if (i915_gem_object_is_purgeable(obj_priv))
+ i915_gem_object_truncate(obj);
+
+ trace_i915_gem_object_unbind(obj);
+
+ return 0;
+}
+
+static struct drm_gem_object *
+i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *best = NULL;
+ struct drm_gem_object *first = NULL;
+
+ /* Try to find the smallest clean object */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->size >= min_size) {
+ if ((!obj_priv->dirty ||
+ i915_gem_object_is_purgeable(obj_priv)) &&
+ (!best || obj->size < best->size)) {
+ best = obj;
+ if (best->size == min_size)
+ return best;
+ }
+ if (!first)
+ first = obj;
+ }
+ }
+
+ return best ? best : first;
+}
+
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* Flush everything (on to the inactive lists) and evict */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
+
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!lists_empty);
+
return 0;
}
static int
-i915_gem_evict_something(struct drm_device *dev)
+i915_gem_evict_something(struct drm_device *dev, int min_size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
for (;;) {
+ i915_gem_retire_requests(dev);
+
/* If there's an inactive buffer available now, grab it
* and be done.
*/
- if (!list_empty(&dev_priv->mm.inactive_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
- BUG_ON(obj_priv->pin_count != 0);
+ obj = i915_gem_find_inactive_object(dev, min_size);
+ if (obj) {
+ struct drm_i915_gem_object *obj_priv;
+
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
+ obj_priv = obj->driver_private;
+ BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
/* Wait on the rendering and unbind the buffer. */
- ret = i915_gem_object_unbind(obj);
- break;
+ return i915_gem_object_unbind(obj);
}
/* If we didn't get anything, but the ring is still processing
- * things, wait for one of those things to finish and hopefully
- * leave us a buffer to evict.
+ * things, wait for the next to finish and hopefully leave us
+ * a buffer to evict.
*/
if (!list_empty(&dev_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
@@ -1995,16 +2159,9 @@ i915_gem_evict_something(struct drm_device *dev)
ret = i915_wait_request(dev, request->seqno);
if (ret)
- break;
+ return ret;
- /* if waiting caused an object to become inactive,
- * then loop around and wait for it. Otherwise, we
- * assume that waiting freed and unbound something,
- * so there should now be some space in the GTT
- */
- if (!list_empty(&dev_priv->mm.inactive_list))
- continue;
- break;
+ continue;
}
/* If we didn't have anything on the request list but there
@@ -2013,46 +2170,44 @@ i915_gem_evict_something(struct drm_device *dev)
* will get moved to inactive.
*/
if (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ struct drm_i915_gem_object *obj_priv;
- i915_gem_flush(dev,
- obj->write_domain,
- obj->write_domain);
- i915_add_request(dev, NULL, obj->write_domain);
+ /* Find an object that we can immediately reuse */
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ obj = obj_priv->obj;
+ if (obj->size >= min_size)
+ break;
- obj = NULL;
- continue;
- }
+ obj = NULL;
+ }
- DRM_ERROR("inactive empty %d request empty %d "
- "flushing empty %d\n",
- list_empty(&dev_priv->mm.inactive_list),
- list_empty(&dev_priv->mm.request_list),
- list_empty(&dev_priv->mm.flushing_list));
- /* If we didn't do any of the above, there's nothing to be done
- * and we just can't fit it in.
- */
- return -ENOSPC;
- }
- return ret;
-}
+ if (obj != NULL) {
+ uint32_t seqno;
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
- int ret;
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ seqno = i915_add_request(dev, NULL, obj->write_domain);
+ if (seqno == 0)
+ return -ENOMEM;
- for (;;) {
- ret = i915_gem_evict_something(dev);
- if (ret != 0)
- break;
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+ }
+
+ /* If we didn't do any of the above, there's no single buffer
+ * large enough to swap out for the new one, so just evict
+ * everything and start again. (This should be rare.)
+ */
+ if (!list_empty (&dev_priv->mm.inactive_list))
+ return i915_gem_evict_from_inactive_list(dev);
+ else
+ return i915_gem_evict_everything(dev);
}
- if (ret == -ENOSPC)
- return 0;
- return ret;
}
int
@@ -2075,7 +2230,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
BUG_ON(obj_priv->pages != NULL);
obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
if (obj_priv->pages == NULL) {
- DRM_ERROR("Faled to allocate page list\n");
obj_priv->pages_refcount--;
return -ENOMEM;
}
@@ -2086,7 +2240,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
page = read_mapping_page(mapping, i, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- DRM_ERROR("read_mapping_page failed: %d\n", ret);
i915_gem_object_put_pages(obj);
return ret;
}
@@ -2323,6 +2476,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
else
i830_write_fence_reg(reg);
+ trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+
return 0;
}
@@ -2405,10 +2560,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
- int page_count, ret;
+ bool retry_alloc = false;
+ int ret;
if (dev_priv->mm.suspended)
return -EBUSY;
+
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to bind a purgeable object\n");
+ return -EINVAL;
+ }
+
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
@@ -2428,30 +2590,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
}
if (obj_priv->gtt_space == NULL) {
- bool lists_empty;
-
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
- if (lists_empty) {
- DRM_ERROR("GTT full, but LRU list empty\n");
- return -ENOSPC;
- }
-
- ret = i915_gem_evict_something(dev);
- if (ret != 0) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
return ret;
- }
+
goto search_free;
}
@@ -2459,27 +2607,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
+ }
ret = i915_gem_object_get_pages(obj);
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
+ }
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
+
+ if (ret == -ENOMEM) {
+ /* first try to clear up some space from the GTT */
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret) {
+ /* now try to shrink everyone else */
+ if (! retry_alloc) {
+ retry_alloc = true;
+ goto search_free;
+ }
+
+ return ret;
+ }
+
+ goto search_free;
+ }
+
return ret;
}
- page_count = obj->size / PAGE_SIZE;
/* Create an AGP memory structure pointing at our pages, and bind it
* into the GTT.
*/
obj_priv->agp_mem = drm_agp_bind_pages(dev,
obj_priv->pages,
- page_count,
+ obj->size >> PAGE_SHIFT,
obj_priv->gtt_offset,
obj_priv->agp_type);
if (obj_priv->agp_mem == NULL) {
i915_gem_object_put_pages(obj);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- return -ENOMEM;
+
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
+ return ret;
+
+ goto search_free;
}
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
@@ -2491,6 +2668,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+
return 0;
}
@@ -2506,15 +2685,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
if (obj_priv->pages == NULL)
return;
- /* XXX: The 865 in particular appears to be weird in how it handles
- * cache flushing. We haven't figured it out, but the
- * clflush+agp_chipset_flush doesn't appear to successfully get the
- * data visible to the PGU, while wbinvd + agp_chipset_flush does.
- */
- if (IS_I865G(obj->dev)) {
- wbinvd();
- return;
- }
+ trace_i915_gem_object_clflush(obj);
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
@@ -2525,21 +2696,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t seqno;
+ uint32_t old_write_domain;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
/* Queue the GPU write cache flushing we need. */
+ old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
+ uint32_t old_write_domain;
+
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
return;
@@ -2547,7 +2726,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
@@ -2555,13 +2739,19 @@ static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ uint32_t old_write_domain;
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/**
@@ -2574,6 +2764,7 @@ int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
@@ -2586,6 +2777,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (ret != 0)
return ret;
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
@@ -2604,6 +2798,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
obj_priv->dirty = 1;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -2616,6 +2814,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
+ uint32_t old_write_domain, old_read_domains;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
@@ -2631,6 +2830,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
@@ -2651,6 +2853,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -2772,10 +2978,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
+ uint32_t old_read_domains;
BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
+ intel_mark_busy(dev, obj);
+
#if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
__func__, obj,
@@ -2816,6 +3025,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
i915_gem_clflush_object(obj);
}
+ old_read_domains = obj->read_domains;
+
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
@@ -2834,6 +3045,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
}
/**
@@ -2886,6 +3101,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_read_domains;
int i, ret;
if (offset == 0 && size == obj->size)
@@ -2932,8 +3148,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ old_read_domains = obj->read_domains;
obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
+
return 0;
}
@@ -2977,6 +3198,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
target_obj_priv = target_obj->driver_private;
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@@ -2988,25 +3224,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
- if (reloc->offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
+ /* Validate that the target is in a valid r/w GPU domain */
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
@@ -3020,7 +3238,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
i915_gem_object_unpin(obj);
return -EINVAL;
}
-
if (reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
@@ -3035,21 +3252,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
@@ -3061,6 +3263,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (reloc->delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta, (int) target_obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret != 0) {
drm_gem_object_unreference(target_obj);
@@ -3119,6 +3352,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
+
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
@@ -3356,7 +3591,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
- if (dev_priv->mm.wedged) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Execbuf while wedged\n");
mutex_unlock(&dev->struct_mutex);
ret = -EIO;
@@ -3414,8 +3649,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* error other than GTT full, or we've already tried again */
if (ret != -ENOSPC || pin_tries >= 1) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to pin buffers %d\n", ret);
+ if (ret != -ERESTARTSYS) {
+ unsigned long long total_size = 0;
+ for (i = 0; i < args->buffer_count; i++)
+ total_size += object_list[i]->size;
+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+ pinned+1, args->buffer_count,
+ total_size, ret);
+ DRM_ERROR("%d objects [%d pinned], "
+ "%d object bytes [%d pinned], "
+ "%d/%d gtt bytes\n",
+ atomic_read(&dev->object_count),
+ atomic_read(&dev->pin_count),
+ atomic_read(&dev->object_memory),
+ atomic_read(&dev->pin_memory),
+ atomic_read(&dev->gtt_memory),
+ dev->gtt_total);
+ }
goto err;
}
@@ -3426,7 +3676,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* evict everyone we can from the aperture */
ret = i915_gem_evict_everything(dev);
- if (ret)
+ if (ret && ret != -ENOSPC)
goto err;
}
@@ -3482,8 +3732,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3600,11 +3854,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to bind: %d\n", ret);
+ if (ret)
return ret;
- }
}
/*
* Pre-965 chips need a fence register set up in order to
@@ -3684,6 +3935,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
obj_priv = obj->driver_private;
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to pin a purgeable buffer\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
@@ -3796,6 +4054,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
return i915_gem_ring_throttle(dev, file_priv);
}
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ switch (args->madv) {
+ case I915_MADV_DONTNEED:
+ case I915_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+ args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_count) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+ return -EINVAL;
+ }
+
+ if (obj_priv->madv != __I915_MADV_PURGED)
+ obj_priv->madv = args->madv;
+
+ /* if the object is no longer bound, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj_priv) &&
+ obj_priv->gtt_space == NULL)
+ i915_gem_object_truncate(obj);
+
+ args->retained = obj_priv->madv != __I915_MADV_PURGED;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
int i915_gem_init_object(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv;
@@ -3820,6 +4128,9 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
INIT_LIST_HEAD(&obj_priv->fence_list);
+ obj_priv->madv = I915_MADV_WILLNEED;
+
+ trace_i915_gem_object_create(obj);
return 0;
}
@@ -3829,6 +4140,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ trace_i915_gem_object_destroy(obj);
+
while (obj_priv->pin_count > 0)
i915_gem_object_unpin(obj);
@@ -3837,43 +4150,35 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_object_unbind(obj);
- i915_gem_free_mmap_offset(obj);
+ if (obj_priv->mmap_offset)
+ i915_gem_free_mmap_offset(obj);
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
kfree(obj->driver_private);
}
-/** Unbinds all objects that are on the given buffer list. */
+/** Unbinds all inactive objects. */
static int
-i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
{
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ drm_i915_private_t *dev_priv = dev->dev_private;
- while (!list_empty(head)) {
- obj_priv = list_first_entry(head,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
+ struct drm_gem_object *obj;
+ int ret;
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Pinned object in unbind list\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
+ obj = list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->obj;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
- DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
- ret);
- mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("Error unbinding object: %d\n", ret);
return ret;
}
}
-
return 0;
}
@@ -3895,6 +4200,7 @@ i915_gem_idle(struct drm_device *dev)
* We need to replace this with a semaphore, or something.
*/
dev_priv->mm.suspended = 1;
+ del_timer(&dev_priv->hangcheck_timer);
/* Cancel the retire work handler, wait for it to finish if running
*/
@@ -3924,7 +4230,7 @@ i915_gem_idle(struct drm_device *dev)
if (last_seqno == cur_seqno) {
if (stuck++ > 100) {
DRM_ERROR("hardware wedged\n");
- dev_priv->mm.wedged = 1;
+ atomic_set(&dev_priv->mm.wedged, 1);
DRM_WAKEUP(&dev_priv->irq_queue);
break;
}
@@ -3937,7 +4243,7 @@ i915_gem_idle(struct drm_device *dev)
i915_gem_retire_requests(dev);
spin_lock(&dev_priv->mm.active_list_lock);
- if (!dev_priv->mm.wedged) {
+ if (!atomic_read(&dev_priv->mm.wedged)) {
/* Active and flushing should now be empty as we've
* waited for a sequence higher than any pending execbuffer
*/
@@ -3955,29 +4261,41 @@ i915_gem_idle(struct drm_device *dev)
* the GPU domains and just stuff them onto inactive.
*/
while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
spin_unlock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+ ret = i915_gem_evict_from_inactive_list(dev);
WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
if (ret) {
mutex_unlock(&dev->struct_mutex);
@@ -4093,7 +4411,6 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
/* Set up the kernel mapping for the ring. */
ring->Size = obj->size;
- ring->tail_mask = obj->size - 1;
ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
ring->map.size = obj->size;
@@ -4200,9 +4517,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (dev_priv->mm.wedged) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- dev_priv->mm.wedged = 0;
+ atomic_set(&dev_priv->mm.wedged, 0);
}
mutex_lock(&dev->struct_mutex);
@@ -4268,6 +4585,10 @@ i915_gem_load(struct drm_device *dev)
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
+ spin_lock(&shrink_list_lock);
+ list_add(&dev_priv->mm.shrink_list, &shrink_list);
+ spin_unlock(&shrink_list_lock);
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3;
@@ -4485,3 +4806,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
list_del_init(i915_file_priv->mm.request_list.next);
mutex_unlock(&dev->struct_mutex);
}
+
+static int
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+ drm_i915_private_t *dev_priv, *next_dev;
+ struct drm_i915_gem_object *obj_priv, *next_obj;
+ int cnt = 0;
+ int would_deadlock = 1;
+
+ /* "fast-path" to count number of available objects */
+ if (nr_to_scan == 0) {
+ spin_lock(&shrink_list_lock);
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (mutex_trylock(&dev->struct_mutex)) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->mm.inactive_list,
+ list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ }
+ spin_unlock(&shrink_list_lock);
+
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
+ }
+
+ spin_lock(&shrink_list_lock);
+
+ /* first scan for clean buffers */
+ list_for_each_entry_safe(dev_priv, next_dev,
+ &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (! mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ i915_gem_retire_requests(dev);
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+ list) {
+ if (i915_gem_object_is_purgeable(obj_priv)) {
+ i915_gem_object_unbind(obj_priv->obj);
+ if (--nr_to_scan <= 0)
+ break;
+ }
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+
+ would_deadlock = 0;
+
+ if (nr_to_scan <= 0)
+ break;
+ }
+
+ /* second pass, evict/count anything still on the inactive list */
+ list_for_each_entry_safe(dev_priv, next_dev,
+ &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (! mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+ list) {
+ if (nr_to_scan > 0) {
+ i915_gem_object_unbind(obj_priv->obj);
+ nr_to_scan--;
+ } else
+ cnt++;
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+
+ would_deadlock = 0;
+ }
+
+ spin_unlock(&shrink_list_lock);
+
+ if (would_deadlock)
+ return -1;
+ else if (cnt > 0)
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
+ else
+ return 0;
+}
+
+static struct shrinker shrinker = {
+ .shrink = i915_gem_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
+__init void
+i915_gem_shrinker_init(void)
+{
+ register_shrinker(&shrinker);
+}
+
+__exit void
+i915_gem_shrinker_exit(void)
+{
+ unregister_shrinker(&shrinker);
+}