summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c559
1 files changed, 388 insertions, 171 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3326770c9ed2..f36126383d26 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -43,6 +44,8 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
+static void
+i915_gem_object_retire(struct drm_i915_gem_object *obj);
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
@@ -50,14 +53,15 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
struct shrink_control *sc);
-static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc);
+static int i915_gem_shrinker_oom(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr);
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -470,6 +474,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
+
+ i915_gem_object_retire(obj);
}
ret = i915_gem_object_get_pages(obj);
@@ -885,6 +891,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
+
+ i915_gem_object_retire(obj);
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
@@ -1088,7 +1096,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* equal.
*/
static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
@@ -1107,7 +1115,7 @@ static void fake_irq(unsigned long data)
}
static bool missed_irq(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
@@ -1138,7 +1146,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
struct timespec *timeout,
@@ -1245,7 +1253,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
* request and object lists appropriately for that event.
*/
int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1270,9 +1278,10 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
- i915_gem_retire_requests_ring(ring);
+ if (!obj->active)
+ return 0;
/* Manually manage the write flush as we may have not yet
* retired the buffer.
@@ -1282,7 +1291,6 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
* we know we have passed the last write.
*/
obj->last_write_seqno = 0;
- obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
return 0;
}
@@ -1295,7 +1303,7 @@ static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine_cs *ring = obj->ring;
u32 seqno;
int ret;
@@ -1320,7 +1328,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter;
u32 seqno;
int ret;
@@ -1536,7 +1544,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
- ret = -EINVAL;
+ ret = -EFAULT;
goto unlock;
}
@@ -1803,12 +1811,16 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+ return obj->madv == I915_MADV_DONTNEED;
+}
+
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- struct inode *inode;
-
i915_gem_object_free_mmap_offset(obj);
if (obj->base.filp == NULL)
@@ -1819,16 +1831,28 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
- inode = file_inode(obj->base.filp);
- shmem_truncate_range(inode, 0, (loff_t)-1);
-
+ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+/* Try to discard unwanted pages */
+static void
+i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
- return obj->madv == I915_MADV_DONTNEED;
+ struct address_space *mapping;
+
+ switch (obj->madv) {
+ case I915_MADV_DONTNEED:
+ i915_gem_object_truncate(obj);
+ case __I915_MADV_PURGED:
+ return;
+ }
+
+ if (obj->base.filp == NULL)
+ return;
+
+ mapping = file_inode(obj->base.filp)->i_mapping,
+ invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
static void
@@ -1893,8 +1917,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
ops->put_pages(obj);
obj->pages = NULL;
- if (i915_gem_object_is_purgeable(obj))
- i915_gem_object_truncate(obj);
+ i915_gem_object_invalidate(obj);
return 0;
}
@@ -1903,58 +1926,58 @@ static unsigned long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
- struct list_head still_bound_list;
- struct drm_i915_gem_object *obj, *next;
+ struct list_head still_in_list;
+ struct drm_i915_gem_object *obj;
unsigned long count = 0;
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.unbound_list,
- global_list) {
- if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
- i915_gem_object_put_pages(obj) == 0) {
- count += obj->base.size >> PAGE_SHIFT;
- if (count >= target)
- return count;
- }
- }
-
/*
- * As we may completely rewrite the bound list whilst unbinding
+ * As we may completely rewrite the (un)bound list whilst unbinding
* (due to retiring requests) we have to strictly process only
* one element of the list at the time, and recheck the list
* on every iteration.
+ *
+ * In particular, we must hold a reference whilst removing the
+ * object as we may end up waiting for and/or retiring the objects.
+ * This might release the final reference (held by the active list)
+ * and result in the object being freed from under us. This is
+ * similar to the precautions the eviction code must take whilst
+ * removing objects.
+ *
+ * Also note that although these lists do not hold a reference to
+ * the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
*/
- INIT_LIST_HEAD(&still_bound_list);
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
+ obj = list_first_entry(&dev_priv->mm.unbound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
+
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, &dev_priv->mm.unbound_list);
+
+ INIT_LIST_HEAD(&still_in_list);
while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
struct i915_vma *vma, *v;
obj = list_first_entry(&dev_priv->mm.bound_list,
typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_bound_list);
+ list_move_tail(&obj->global_list, &still_in_list);
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
- /*
- * Hold a reference whilst we unbind this object, as we may
- * end up waiting for and retiring requests. This might
- * release the final reference (held by the active list)
- * and result in the object being freed from under us.
- * in this object being freed.
- *
- * Note 1: Shrinking the bound list is special since only active
- * (and hence bound objects) can contain such limbo objects, so
- * we don't need special tricks for shrinking the unbound list.
- * The only other place where we have to be careful with active
- * objects suddenly disappearing due to retiring requests is the
- * eviction code.
- *
- * Note 2: Even though the bound list doesn't hold a reference
- * to the object we can safely grab one here: The final object
- * unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
- * object on the bound_list with a reference count equals 0.
- */
drm_gem_object_reference(&obj->base);
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
@@ -1966,7 +1989,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
drm_gem_object_unreference(&obj->base);
}
- list_splice(&still_bound_list, &dev_priv->mm.bound_list);
+ list_splice(&still_in_list, &dev_priv->mm.bound_list);
return count;
}
@@ -1980,17 +2003,8 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
- struct drm_i915_gem_object *obj, *next;
- long freed = 0;
-
i915_gem_evict_everything(dev_priv->dev);
-
- list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
- global_list) {
- if (i915_gem_object_put_pages(obj) == 0)
- freed += obj->base.size >> PAGE_SHIFT;
- }
- return freed;
+ return __i915_gem_shrink(dev_priv, LONG_MAX, false);
}
static int
@@ -2094,7 +2108,19 @@ err_pages:
page_cache_release(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);
- return PTR_ERR(page);
+
+ /* shmemfs first checks if there is enough memory to allocate the page
+ * and reports ENOSPC should there be insufficient, along with the usual
+ * ENOMEM for a genuine allocation failure.
+ *
+ * We use ENOSPC in our driver to mean that we have run out of aperture
+ * space and so want to translate the error from shmemfs back to our
+ * usual understanding of ENOMEM.
+ */
+ if (PTR_ERR(page) == -ENOSPC)
+ return -ENOMEM;
+ else
+ return PTR_ERR(page);
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -2131,7 +2157,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2169,7 +2195,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
list_move_tail(&vma->mm_list, &vma->vm->active_list);
return i915_gem_object_move_to_active(vma->obj, ring);
@@ -2207,11 +2233,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev));
}
+static void
+i915_gem_object_retire(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *ring = obj->ring;
+
+ if (ring == NULL)
+ return;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ obj->last_read_seqno))
+ i915_gem_object_move_to_inactive(obj);
+}
+
static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i, j;
/* Carefully retire all requests without writing to the rings */
@@ -2226,8 +2265,8 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
for_each_ring(ring, dev_priv, i) {
intel_ring_init_seqno(ring, seqno);
- for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
- ring->sync_seqno[j] = 0;
+ for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
+ ring->semaphore.sync_seqno[j] = 0;
}
return 0;
@@ -2277,7 +2316,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0;
}
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
@@ -2382,7 +2421,7 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
- const struct i915_hw_context *ctx)
+ const struct intel_context *ctx)
{
unsigned long elapsed;
@@ -2395,8 +2434,9 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
- } else if (dev_priv->gpu_error.stop_rings == 0) {
- DRM_ERROR("gpu hanging too fast, banning!\n");
+ } else if (i915_stop_ring_allow_ban(dev_priv)) {
+ if (i915_stop_ring_allow_warn(dev_priv))
+ DRM_ERROR("gpu hanging too fast, banning!\n");
return true;
}
}
@@ -2405,7 +2445,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
}
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
- struct i915_hw_context *ctx,
+ struct intel_context *ctx,
const bool guilty)
{
struct i915_ctx_hang_stats *hs;
@@ -2436,7 +2476,7 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
}
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring)
+i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
u32 completed_seqno;
@@ -2454,7 +2494,7 @@ i915_gem_find_active_request(struct intel_ring_buffer *ring)
}
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
bool ring_hung;
@@ -2473,7 +2513,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
}
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine_cs *ring)
{
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
@@ -2501,6 +2541,11 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
+
+ /* These may not have been flush before the reset, do so now */
+ kfree(ring->preallocated_lazy_request);
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
}
void i915_gem_restore_fences(struct drm_device *dev)
@@ -2527,7 +2572,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
/*
@@ -2541,8 +2586,6 @@ void i915_gem_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_cleanup(dev_priv, ring);
- i915_gem_cleanup_ringbuffer(dev);
-
i915_gem_context_reset(dev);
i915_gem_restore_fences(dev);
@@ -2551,8 +2594,8 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
-static void
-i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+void
+i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
uint32_t seqno;
@@ -2597,7 +2640,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
* of tail of the request to update the last known position
* of the GPU head.
*/
- ring->last_retired_head = request->tail;
+ ring->buffer->last_retired_head = request->tail;
i915_gem_free_request(request);
}
@@ -2615,7 +2658,7 @@ bool
i915_gem_retire_requests(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
bool idle = true;
int i;
@@ -2709,7 +2752,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct intel_ring_buffer *ring = NULL;
+ struct intel_engine_cs *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
unsigned reset_counter;
u32 seqno = 0;
@@ -2780,9 +2823,9 @@ out:
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to)
+ struct intel_engine_cs *to)
{
- struct intel_ring_buffer *from = obj->ring;
+ struct intel_engine_cs *from = obj->ring;
u32 seqno;
int ret, idx;
@@ -2795,7 +2838,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to);
seqno = obj->last_read_seqno;
- if (seqno <= from->sync_seqno[idx])
+ if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
ret = i915_gem_check_olr(obj->ring, seqno);
@@ -2803,13 +2846,13 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
return ret;
trace_i915_gem_ring_sync_to(from, to, seqno);
- ret = to->sync_to(to, from, seqno);
+ ret = to->semaphore.sync_to(to, from, seqno);
if (!ret)
/* We use last_read_seqno because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
- from->sync_seqno[idx] = obj->last_read_seqno;
+ from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
return ret;
}
@@ -2865,12 +2908,14 @@ int i915_vma_unbind(struct i915_vma *vma)
* cause memory corruption through use-after-free.
*/
- i915_gem_object_finish_gtt(obj);
+ if (i915_is_ggtt(vma->vm)) {
+ i915_gem_object_finish_gtt(obj);
- /* release the fence reg _after_ flushing */
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- return ret;
+ /* release the fence reg _after_ flushing */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
trace_i915_vma_unbind(vma);
@@ -2903,7 +2948,7 @@ int i915_vma_unbind(struct i915_vma *vma)
int i915_gpu_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int ret, i;
/* Flush everything onto the inactive list. */
@@ -3144,6 +3189,9 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
fence = &dev_priv->fence_regs[obj->fence_reg];
+ if (WARN_ON(fence->pin_count))
+ return -EBUSY;
+
i915_gem_object_fence_lost(obj);
i915_gem_object_update_fence(obj, fence, false);
@@ -3548,6 +3596,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
+ i915_gem_object_retire(obj);
i915_gem_object_flush_cpu_write_domain(obj, false);
/* Serialise direct access to this object with the barriers for
@@ -3646,6 +3695,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* in obj->write_domain and have been skipping the clflushes.
* Just set it to the CPU cache for now.
*/
+ i915_gem_object_retire(obj);
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
old_read_domains = obj->base.read_domains;
@@ -3743,6 +3793,15 @@ unlock:
static bool is_pin_display(struct drm_i915_gem_object *obj)
{
+ struct i915_vma *vma;
+
+ if (list_empty(&obj->vma_list))
+ return false;
+
+ vma = i915_gem_obj_to_ggtt(obj);
+ if (!vma)
+ return false;
+
/* There are 3 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
@@ -3754,7 +3813,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
* subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine.
*/
- return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
+ return vma->pin_count - !!obj->user_pin_count;
}
/*
@@ -3765,9 +3824,10 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_ring_buffer *pipelined)
+ struct intel_engine_cs *pipelined)
{
u32 old_read_domains, old_write_domain;
+ bool was_pin_display;
int ret;
if (pipelined != obj->ring) {
@@ -3779,6 +3839,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
+ was_pin_display = obj->pin_display;
obj->pin_display = true;
/* The display engine is not coherent with the LLC cache on gen6. As
@@ -3821,7 +3882,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return 0;
err_unpin_display:
- obj->pin_display = is_pin_display(obj);
+ WARN_ON(was_pin_display != is_pin_display(obj));
+ obj->pin_display = was_pin_display;
return ret;
}
@@ -3868,6 +3930,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
+ i915_gem_object_retire(obj);
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -3917,7 +3980,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
- struct intel_ring_buffer *ring = NULL;
+ struct intel_engine_cs *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret;
@@ -3976,9 +4039,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
uint64_t flags)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
int ret;
+ if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
+ return -ENODEV;
+
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
@@ -4032,6 +4099,32 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
obj->pin_mappable = false;
}
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+ WARN_ON(!ggtt_vma ||
+ dev_priv->fence_regs[obj->fence_reg].pin_count >
+ ggtt_vma->pin_count);
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -4292,6 +4385,30 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return obj;
}
+static bool discard_backing_storage(struct drm_i915_gem_object *obj)
+{
+ /* If we are the last user of the backing storage (be it shmemfs
+ * pages or stolen etc), we know that the pages are going to be
+ * immediately released. In this case, we can then skip copying
+ * back the contents from the GPU.
+ */
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ return false;
+
+ if (obj->base.filp == NULL)
+ return true;
+
+ /* At first glance, this looks racy, but then again so would be
+ * userspace racing mmap against close. However, the first external
+ * reference to the filp can only be obtained through the
+ * i915_gem_mmap_ioctl() which safeguards us against the user
+ * acquiring such a reference whilst we are in the middle of
+ * freeing the object.
+ */
+ return atomic_long_read(&obj->base.filp->f_count) == 1;
+}
+
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4329,6 +4446,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
+ if (discard_backing_storage(obj))
+ obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
@@ -4338,6 +4457,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -4371,6 +4493,17 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
kfree(vma);
}
+static void
+i915_gem_stop_ringbuffers(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ intel_stop_ring_buffer(ring);
+}
+
int
i915_gem_suspend(struct drm_device *dev)
{
@@ -4392,7 +4525,7 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_evict_everything(dev);
i915_kernel_lost_context(dev);
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_stop_ringbuffers(dev);
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
@@ -4413,7 +4546,7 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
+int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4512,13 +4645,20 @@ static int i915_gem_init_rings(struct drm_device *dev)
goto cleanup_blt_ring;
}
+ if (HAS_BSD2(dev)) {
+ ret = intel_init_bsd2_ring_buffer(dev);
+ if (ret)
+ goto cleanup_vebox_ring;
+ }
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
- goto cleanup_vebox_ring;
+ goto cleanup_bsd2_ring;
return 0;
+cleanup_bsd2_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
cleanup_vebox_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
cleanup_blt_ring:
@@ -4576,15 +4716,11 @@ i915_gem_init_hw(struct drm_device *dev)
* the do_switch), but before enabling PPGTT. So don't move this.
*/
ret = i915_gem_context_enable(dev_priv);
- if (ret) {
+ if (ret && ret != -EIO) {
DRM_ERROR("Context enable failed %d\n", ret);
- goto err_out;
+ i915_gem_cleanup_ringbuffer(dev);
}
- return 0;
-
-err_out:
- i915_gem_cleanup_ringbuffer(dev);
return ret;
}
@@ -4597,11 +4733,13 @@ int i915_gem_init(struct drm_device *dev)
if (IS_VALLEYVIEW(dev)) {
/* VLVA0 (potential hack), BIOS isn't actually waking us */
- I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
- if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
+ if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
+ VLV_GTLC_ALLOWWAKEACK), 10))
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
+ i915_gem_init_userptr(dev);
i915_gem_init_global_gtt(dev);
ret = i915_gem_context_init(dev);
@@ -4611,25 +4749,28 @@ int i915_gem_init(struct drm_device *dev)
}
ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- WARN_ON(dev_priv->mm.aliasing_ppgtt);
- i915_gem_context_fini(dev);
- drm_mm_takedown(&dev_priv->gtt.base.mm);
- return ret;
+ if (ret == -EIO) {
+ /* Allow ring initialisation to fail by marking the GPU as
+ * wedged. But we only want to do this where the GPU is angry,
+ * for all other failure, such as an allocation failure, bail.
+ */
+ DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ ret = 0;
}
+ mutex_unlock(&dev->struct_mutex);
/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->dri1.allow_batchbuffer = 1;
- return 0;
+ return ret;
}
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine_cs *ring;
int i;
for_each_ring(ring, dev_priv, i)
@@ -4661,16 +4802,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
- mutex_unlock(&dev->struct_mutex);
- ret = drm_irq_install(dev);
+ ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_ringbuffer;
+ mutex_unlock(&dev->struct_mutex);
return 0;
cleanup_ringbuffer:
- mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
@@ -4685,7 +4825,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
+ mutex_lock(&dev->struct_mutex);
drm_irq_uninstall(dev);
+ mutex_unlock(&dev->struct_mutex);
return i915_gem_suspend(dev);
}
@@ -4704,7 +4846,7 @@ i915_gem_lastclose(struct drm_device *dev)
}
static void
-init_ring_lists(struct intel_ring_buffer *ring)
+init_ring_lists(struct intel_engine_cs *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
@@ -4752,7 +4894,7 @@ i915_gem_load(struct drm_device *dev)
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- if (IS_GEN3(dev)) {
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
@@ -4779,10 +4921,13 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
- dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
- dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.inactive_shrinker);
+ dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.shrinker);
+
+ dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ register_oom_notifier(&dev_priv->mm.oom_notifier);
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4857,27 +5002,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+ return false;
+
+ *unlock = false;
+ } else
+ *unlock = true;
+
+ return true;
+}
+
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+
+ return count;
+}
+
static unsigned long
-i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
- container_of(shrinker,
- struct drm_i915_private,
- mm.inactive_shrinker);
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
- bool unlock = true;
unsigned long count;
+ bool unlock;
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
-
- if (dev_priv->mm.shrinker_no_lock_stealing)
- return 0;
-
- unlock = false;
- }
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return 0;
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
@@ -4885,10 +5049,8 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (obj->active)
- continue;
-
- if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
+ if (!i915_gem_obj_is_pinned(obj) &&
+ obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -4961,44 +5123,99 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
}
static unsigned long
-i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
- container_of(shrinker,
- struct drm_i915_private,
- mm.inactive_shrinker);
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
unsigned long freed;
- bool unlock = true;
+ bool unlock;
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return SHRINK_STOP;
-
- if (dev_priv->mm.shrinker_no_lock_stealing)
- return SHRINK_STOP;
-
- unlock = false;
- }
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
if (freed < sc->nr_to_scan)
freed += __i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed,
false);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink_all(dev_priv);
-
if (unlock)
mutex_unlock(&dev->struct_mutex);
return freed;
}
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long timeout = msecs_to_jiffies(5000) + 1;
+ unsigned long pinned, bound, unbound, freed;
+ bool was_interruptible;
+ bool unlock;
+
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+ schedule_timeout_killable(1);
+ if (timeout == 0) {
+ pr_err("Unable to purge GPU memory due lock contention.\n");
+ return NOTIFY_DONE;
+ }
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ freed = i915_gem_shrink_all(dev_priv);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ /* Because we may be allocating inside our own driver, we cannot
+ * assert that there are no objects with pinned pages that are not
+ * being pointed to by hardware.
+ */
+ unbound = bound = pinned = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (!obj->base.filp) /* not backed by a freeable object */
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ unbound += obj->base.size;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!obj->base.filp)
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ bound += obj->base.size;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed, pinned);
+ if (unbound || bound)
+ pr_err("%lu and %lu bytes still available in the "
+ "bound and unbound GPU page lists.\n",
+ bound, unbound);
+
+ *(unsigned long *)ptr += freed;
+ return NOTIFY_DONE;
+}
+
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
+ /* This WARN has probably outlived its usefulness (callers already
+ * WARN if they don't find the GGTT vma they expect). When removing,
+ * remember to remove the pre-check in is_pin_display() as well */
if (WARN_ON(list_empty(&obj->vma_list)))
return NULL;