summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c729
1 files changed, 423 insertions, 306 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ec8a0d7ffa39..ef3d91dda71a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
@@ -128,9 +129,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(obj);
if (ret)
return ret;
@@ -164,7 +163,7 @@ fast_shmem_read(struct page **pages,
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj_priv->tiling_mode != I915_TILING_NONE;
@@ -265,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -286,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
@@ -355,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
@@ -404,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
@@ -480,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Bounds check source.
*
@@ -488,7 +487,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -501,7 +500,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
file_priv);
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -582,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
@@ -606,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto fail;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) {
@@ -656,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
@@ -700,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto out_unpin_object;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) {
@@ -762,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -782,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -830,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
@@ -878,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -953,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Bounds check destination.
*
@@ -961,7 +960,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -995,7 +994,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
DRM_INFO("pwrite failed %d\n", ret);
#endif
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -1035,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
@@ -1097,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
DRM_INFO("%s: sw_finish %d (%p %zd)\n",
__func__, args->handle, obj, obj->size);
#endif
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Pinned buffers may be scanout, so flush the cache */
if (obj_priv->pin_count)
@@ -1138,9 +1137,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
up_write(&current->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1170,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1237,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_map_list *list;
struct drm_local_map *map;
int ret = 0;
@@ -1308,7 +1305,7 @@ void
i915_gem_release_mmap(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (dev->dev_mapping)
unmap_mapping_range(dev->dev_mapping,
@@ -1319,7 +1316,7 @@ static void
i915_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
@@ -1350,7 +1347,7 @@ static uint32_t
i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int start, i;
/*
@@ -1409,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1453,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
void
i915_gem_object_put_pages(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size / PAGE_SIZE;
int i;
@@ -1470,9 +1467,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
obj_priv->dirty = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->pages[i] == NULL)
- break;
-
if (obj_priv->dirty)
set_page_dirty(obj_priv->pages[i]);
@@ -1492,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
/* Add a reference if we're newly entering the active list. */
if (!obj_priv->active) {
@@ -1512,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
BUG_ON(!obj_priv->active);
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1523,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
static void
i915_gem_object_truncate(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
inode = obj->filp->f_path.dentry->d_inode;
@@ -1544,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
@@ -1562,6 +1556,45 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
i915_verify_inactive(dev, __FILE__, __LINE__);
}
+static void
+i915_gem_process_flushing_list(struct drm_device *dev,
+ uint32_t flush_domains, uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if ((obj->write_domain & flush_domains) ==
+ obj->write_domain) {
+ uint32_t old_write_domain = obj->write_domain;
+
+ obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_active(obj, seqno);
+
+ /* update the fence lru list */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ list_move_tail(&obj_priv->fence_list,
+ &dev_priv->mm.fence_list);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
+ }
+ }
+}
+
+#define PIPE_CONTROL_FLUSH(addr) \
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
+ OUT_RING(0); \
+ OUT_RING(0); \
+
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1596,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (dev_priv->mm.next_gem_seqno == 0)
dev_priv->mm.next_gem_seqno++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+ if (HAS_PIPE_CONTROL(dev)) {
+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ /*
+ * Workaround qword write incoherence by flushing the
+ * PIPE_NOTIFY buffers out to memory before requesting
+ * an interrupt.
+ */
+ BEGIN_LP_RING(32);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
DRM_DEBUG_DRIVER("%d\n", seqno);
@@ -1620,29 +1687,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
- if (flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.gpu_write_list,
- gpu_write_list) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
- uint32_t old_write_domain = obj->write_domain;
-
- obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- }
-
- }
+ if (flush_domains != 0)
+ i915_gem_process_flushing_list(dev, flush_domains, seqno);
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1747,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ if (HAS_PIPE_CONTROL(dev))
+ return ((volatile u32 *)(dev_priv->seqno_page))[0];
+ else
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
}
/**
@@ -1822,7 +1871,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
return -EIO;
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
@@ -1960,7 +2009,7 @@ static int
i915_gem_object_wait_rendering(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
/* This function only exists to support waiting for existing rendering,
@@ -1991,7 +2040,8 @@ int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
#if WATCH_BUF
@@ -2046,8 +2096,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
}
/* Remove ourselves from the LRU list if present. */
+ spin_lock(&dev_priv->mm.active_list_lock);
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
@@ -2085,11 +2137,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
}
static int
+i915_gpu_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool lists_empty;
+ uint32_t seqno;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return 0;
+
+ /* Flush everything onto the inactive list. */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
+
+ return i915_wait_request(dev, seqno);
+}
+
+static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
@@ -2102,12 +2177,7 @@ i915_gem_evict_everything(struct drm_device *dev)
return -ENOSPC;
/* Flush everything (on to the inactive lists) and evict */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
- return -ENOMEM;
-
- ret = i915_wait_request(dev, seqno);
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
@@ -2147,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
@@ -2199,11 +2269,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
seqno = i915_add_request(dev, NULL, obj->write_domain);
if (seqno == 0)
return -ENOMEM;
-
- ret = i915_wait_request(dev, seqno);
- if (ret)
- return ret;
-
continue;
}
}
@@ -2223,12 +2288,11 @@ int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count, i;
struct address_space *mapping;
struct inode *inode;
struct page *page;
- int ret;
if (obj_priv->pages_refcount++ != 0)
return 0;
@@ -2251,11 +2315,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
mapping_gfp_mask (mapping) |
__GFP_COLD |
gfpmask);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- i915_gem_object_put_pages(obj);
- return ret;
- }
+ if (IS_ERR(page))
+ goto err_pages;
+
obj_priv->pages[i] = page;
}
@@ -2263,6 +2325,37 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
i915_gem_object_do_bit_17_swizzle(obj);
return 0;
+
+err_pages:
+ while (i--)
+ page_cache_release(obj_priv->pages[i]);
+
+ drm_free_large(obj_priv->pages);
+ obj_priv->pages = NULL;
+ obj_priv->pages_refcount--;
+ return PTR_ERR(page);
+}
+
+static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+ struct drm_gem_object *obj = reg->obj;
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ int regnum = obj_priv->fence_reg;
+ uint64_t val;
+
+ val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj_priv->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
+
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
}
static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -2270,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint64_t val;
@@ -2290,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
int tile_width;
uint32_t fence_reg, val;
@@ -2313,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
+ if (obj_priv->tiling_mode == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(dev))
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+ else
+ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2332,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint32_t val;
uint32_t pitch_val;
@@ -2361,6 +2460,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
+static int i915_find_fence_reg(struct drm_device *dev)
+{
+ struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_gem_object *obj_priv = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj = NULL;
+ int i, avail, ret;
+
+ /* First try to find a free reg */
+ avail = 0;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return i;
+
+ obj_priv = to_intel_bo(reg->obj);
+ if (!obj_priv->pin_count)
+ avail++;
+ }
+
+ if (avail == 0)
+ return -ENOSPC;
+
+ /* None available, try to steal one or wait for a user to finish */
+ i = I915_FENCE_REG_NONE;
+ list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
+ fence_list) {
+ obj = obj_priv->obj;
+
+ if (obj_priv->pin_count)
+ continue;
+
+ /* found one! */
+ i = obj_priv->fence_reg;
+ break;
+ }
+
+ BUG_ON(i == I915_FENCE_REG_NONE);
+
+ /* We only have a reference on obj from the active list. put_fence_reg
+ * might drop that one, causing a use-after-free in it. So hold a
+ * private reference to obj like the other callers of put_fence_reg
+ * (set_tiling ioctl) do. */
+ drm_gem_object_reference(obj);
+ ret = i915_gem_object_put_fence_reg(obj);
+ drm_gem_object_unreference(obj);
+ if (ret != 0)
+ return ret;
+
+ return i;
+}
+
/**
* i915_gem_object_get_fence_reg - set up a fence reg for an object
* @obj: object to map through a fence reg
@@ -2379,10 +2530,9 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *old_obj_priv = NULL;
- int i, ret, avail;
+ int ret;
/* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2410,86 +2560,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
break;
}
- /* First try to find a free reg */
- avail = 0;
- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- break;
-
- old_obj_priv = reg->obj->driver_private;
- if (!old_obj_priv->pin_count)
- avail++;
- }
-
- /* None available, try to steal one or wait for a user to finish */
- if (i == dev_priv->num_fence_regs) {
- struct drm_gem_object *old_obj = NULL;
-
- if (avail == 0)
- return -ENOSPC;
-
- list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
- fence_list) {
- old_obj = old_obj_priv->obj;
-
- if (old_obj_priv->pin_count)
- continue;
-
- /* Take a reference, as otherwise the wait_rendering
- * below may cause the object to get freed out from
- * under us.
- */
- drm_gem_object_reference(old_obj);
-
- /* i915 uses fences for GPU access to tiled buffers */
- if (IS_I965G(dev) || !old_obj_priv->active)
- break;
-
- /* This brings the object to the head of the LRU if it
- * had been written to. The only way this should
- * result in us waiting longer than the expected
- * optimal amount of time is if there was a
- * fence-using buffer later that was read-only.
- */
- i915_gem_object_flush_gpu_write_domain(old_obj);
- ret = i915_gem_object_wait_rendering(old_obj);
- if (ret != 0) {
- drm_gem_object_unreference(old_obj);
- return ret;
- }
-
- break;
- }
-
- /*
- * Zap this virtual mapping so we can set up a fence again
- * for this object next time we need it.
- */
- i915_gem_release_mmap(old_obj);
-
- i = old_obj_priv->fence_reg;
- reg = &dev_priv->fence_regs[i];
-
- old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&old_obj_priv->fence_list);
-
- drm_gem_object_unreference(old_obj);
- }
+ ret = i915_find_fence_reg(dev);
+ if (ret < 0)
+ return ret;
- obj_priv->fence_reg = i;
+ obj_priv->fence_reg = ret;
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
reg->obj = obj;
- if (IS_I965G(dev))
+ if (IS_GEN6(dev))
+ sandybridge_write_fence_reg(reg);
+ else if (IS_I965G(dev))
i965_write_fence_reg(reg);
else if (IS_I9XX(dev))
i915_write_fence_reg(reg);
else
i830_write_fence_reg(reg);
- trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+ trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
+ obj_priv->tiling_mode);
return 0;
}
@@ -2506,11 +2597,14 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- if (IS_I965G(dev))
+ if (IS_GEN6(dev)) {
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
+ (obj_priv->fence_reg * 8), 0);
+ } else if (IS_I965G(dev)) {
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- else {
+ } else {
uint32_t fence_reg;
if (obj_priv->fence_reg < 8)
@@ -2539,11 +2633,17 @@ int
i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
+ /* If we've changed tiling, GTT-mappings of the object
+ * need to re-fault to ensure that the correct fence register
+ * setup is in place.
+ */
+ i915_gem_release_mmap(obj);
+
/* On the i915, GPU access to tiled buffers is via a fence,
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
@@ -2552,12 +2652,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
- i915_gem_object_flush_gtt_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
}
+ i915_gem_object_flush_gtt_write_domain(obj);
i915_gem_clear_fence_reg (obj);
return 0;
@@ -2571,7 +2671,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space;
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
int ret;
@@ -2678,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
void
i915_gem_clflush_object(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
@@ -2697,7 +2797,6 @@ static void
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- uint32_t seqno;
uint32_t old_write_domain;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2706,9 +2805,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
+ (void) i915_add_request(dev, NULL, obj->write_domain);
BUG_ON(obj->write_domain);
- i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
@@ -2781,7 +2879,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2831,7 +2929,7 @@ int
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -3044,7 +3142,7 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
uint32_t old_read_domains;
@@ -3129,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (!obj_priv->page_cpu_valid)
return;
@@ -3169,7 +3267,7 @@ static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int i, ret;
@@ -3238,7 +3336,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int i, ret;
void __iomem *reloc_page;
bool need_fence;
@@ -3247,7 +3345,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
obj_priv->tiling_mode != I915_TILING_NONE;
/* Check fence reg constraints and rebind if necessary */
- if (need_fence && !i915_obj_fenceable(dev, obj))
+ if (need_fence && !i915_gem_object_fence_offset_ok(obj,
+ obj_priv->tiling_mode))
i915_gem_object_unbind(obj);
/* Choose the GTT offset for our buffer and put it there. */
@@ -3288,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
i915_gem_object_unpin(obj);
return -EBADF;
}
- target_obj_priv = target_obj->driver_private;
+ target_obj_priv = to_intel_bo(target_obj);
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3317,6 +3416,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
/* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return -EINVAL;
+ }
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
@@ -3630,7 +3739,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
prepare_to_wait(&dev_priv->pending_flip_queue,
&wait, TASK_INTERRUPTIBLE);
for (i = 0; i < count; i++) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
if (atomic_read(&obj_priv->pending_flip) > 0)
break;
}
@@ -3739,7 +3848,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
@@ -3865,7 +3974,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
@@ -3940,7 +4049,7 @@ err:
for (i = 0; i < args->buffer_count; i++) {
if (object_list[i]) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
obj_priv->in_execbuffer = false;
}
drm_gem_object_unreference(object_list[i]);
@@ -4118,7 +4227,7 @@ int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4151,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
i915_verify_inactive(dev, __FILE__, __LINE__);
obj_priv->pin_count--;
@@ -4191,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return -EBADF;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4248,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return -EBADF;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_filp != file_priv) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
@@ -4290,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/
i915_gem_retire_requests(dev);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Don't count being on the flushing list against the object being
* done. Otherwise, a buffer left on the flushing list but not getting
* flushed (because nobody's flushing that domain) won't ever return
@@ -4336,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count) {
drm_gem_object_unreference(obj);
@@ -4397,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
void i915_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
trace_i915_gem_object_destroy(obj);
@@ -4445,8 +4554,7 @@ int
i915_gem_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno, cur_seqno, last_seqno;
- int stuck, ret;
+ int ret;
mutex_lock(&dev->struct_mutex);
@@ -4455,116 +4563,80 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- */
- dev_priv->mm.suspended = 1;
- del_timer(&dev_priv->hangcheck_timer);
-
- /* Cancel the retire work handler, wait for it to finish if running
- */
- mutex_unlock(&dev->struct_mutex);
- cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- mutex_lock(&dev->struct_mutex);
-
- i915_kernel_lost_context(dev);
-
- /* Flush the GPU along with all non-CPU write domains
- */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-
- if (seqno == 0) {
+ ret = i915_gpu_idle(dev);
+ if (ret) {
mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
+ return ret;
}
- dev_priv->mm.waiting_gem_seqno = seqno;
- last_seqno = 0;
- stuck = 0;
- for (;;) {
- cur_seqno = i915_get_gem_seqno(dev);
- if (i915_seqno_passed(cur_seqno, seqno))
- break;
- if (last_seqno == cur_seqno) {
- if (stuck++ > 100) {
- DRM_ERROR("hardware wedged\n");
- atomic_set(&dev_priv->mm.wedged, 1);
- DRM_WAKEUP(&dev_priv->irq_queue);
- break;
- }
+ /* Under UMS, be paranoid and evict. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
- msleep(10);
- last_seqno = cur_seqno;
- }
- dev_priv->mm.waiting_gem_seqno = 0;
-
- i915_gem_retire_requests(dev);
-
- spin_lock(&dev_priv->mm.active_list_lock);
- if (!atomic_read(&dev_priv->mm.wedged)) {
- /* Active and flushing should now be empty as we've
- * waited for a sequence higher than any pending execbuffer
- */
- WARN_ON(!list_empty(&dev_priv->mm.active_list));
- WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
- /* Request should now be empty as we've also waited
- * for the last request in the list
- */
- WARN_ON(!list_empty(&dev_priv->mm.request_list));
}
- /* Empty the active and flushing lists to inactive. If there's
- * anything left at this point, it means that we're wedged and
- * nothing good's going to happen by leaving them there. So strip
- * the GPU domains and just stuff them onto inactive.
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound mm.suspended!
*/
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
+ dev_priv->mm.suspended = 1;
+ del_timer(&dev_priv->hangcheck_timer);
- obj = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
+ i915_kernel_lost_context(dev);
+ i915_gem_cleanup_ringbuffer(dev);
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
+ mutex_unlock(&dev->struct_mutex);
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
+ /* Cancel the retire work handler, which should be idle now. */
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
+ return 0;
+}
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+static int
+i915_gem_init_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ obj = drm_gem_object_alloc(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
}
+ obj_priv = to_intel_bo(obj);
+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret)
+ goto err_unref;
- /* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_inactive_list(dev);
- WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->seqno_page = kmap(obj_priv->pages[0]);
+ if (dev_priv->seqno_page == NULL)
+ goto err_unpin;
- i915_gem_cleanup_ringbuffer(dev);
- mutex_unlock(&dev->struct_mutex);
+ dev_priv->seqno_obj = obj;
+ memset(dev_priv->seqno_page, 0, PAGE_SIZE);
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
}
static int
@@ -4584,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev)
obj = drm_gem_object_alloc(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
drm_gem_object_unreference(obj);
- return ret;
+ goto err_unref;
}
dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4601,17 +4674,52 @@ i915_gem_init_hws(struct drm_device *dev)
if (dev_priv->hw_status_page == NULL) {
DRM_ERROR("Failed to map status page.\n");
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unpin;
}
+
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ goto err_unpin;
+ }
+
dev_priv->hws_obj = obj;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
+ if (IS_GEN6(dev)) {
+ I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
+ I915_READ(HWS_PGA_GEN6); /* posting read */
+ } else {
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ I915_READ(HWS_PGA); /* posting read */
+ }
DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return 0;
+}
+
+static void
+i915_gem_cleanup_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = dev_priv->seqno_obj;
+ obj_priv = to_intel_bo(obj);
+ kunmap(obj_priv->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ dev_priv->seqno_obj = NULL;
+
+ dev_priv->seqno_page = NULL;
}
static void
@@ -4625,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
return;
obj = dev_priv->hws_obj;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
kunmap(obj_priv->pages[0]);
i915_gem_object_unpin(obj);
@@ -4635,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
dev_priv->hw_status_page = NULL;
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+
/* Write high address into HWS_PGA when disabling. */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
@@ -4659,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
i915_gem_cleanup_hws(dev);
return -ENOMEM;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
@@ -4745,6 +4856,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
ring->space += ring->Size;
}
+ if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+ I915_WRITE(MI_MODE,
+ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ }
+
return 0;
}
@@ -4850,7 +4966,8 @@ i915_gem_load(struct drm_device *dev)
spin_unlock(&shrink_list_lock);
/* Old X drivers will take 0-2 for front, back, depth buffers */
- dev_priv->fence_reg_start = 3;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
@@ -4946,7 +5063,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
int ret;
int page_count;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (!obj_priv->phys_obj)
return;
@@ -4985,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->phys_obj) {
if (obj_priv->phys_obj->id == id)
@@ -5036,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
void *obj_addr;
int ret;
char __user *user_data;