summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-09-02 03:45:29 +0400
committerDave Airlie <airlied@linux.ie>2008-10-18 01:10:51 +0400
commit546b0974c39657017407c86fe79811100b60700d (patch)
tree42ae164d23ecaa1cb78ad87ad9603e0bdd29740d /drivers/gpu/drm/i915
parented4c9c4acf948b42b138747fcb8843ecb1a24ce4 (diff)
downloadlinux-546b0974c39657017407c86fe79811100b60700d.tar.xz
i915: Use struct_mutex to protect ring in GEM mode.
In the conversion for GEM, we had stopped using the hardware lock to protect ring usage, since it was all internal to the DRM now. However, some paths weren't converted to using struct_mutex to prevent multiple threads from concurrently working on the ring, in particular between the vblank swap handler and ioctls. Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c28
4 files changed, 63 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ea85d71cab04..d71c89f8802e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -588,9 +588,15 @@ static int i915_quiescent(struct drm_device * dev)
static int i915_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ int ret;
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
- return i915_quiescent(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_quiescent(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
static int i915_batchbuffer(struct drm_device *dev, void *data,
@@ -611,14 +617,16 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect)))
return -EFAULT;
+ mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_batchbuffer(dev, batch);
+ mutex_unlock(&dev->struct_mutex);
sarea_priv->last_dispatch = (int)hw_status[5];
return ret;
@@ -637,7 +645,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects &&
DRM_VERIFYAREA_READ(cmdbuf->cliprects,
@@ -647,7 +655,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return -EFAULT;
}
+ mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+ mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
return ret;
@@ -660,11 +670,17 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ int ret;
+
DRM_DEBUG("%s\n", __func__);
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
- return i915_dispatch_flip(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_dispatch_flip(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
static int i915_getparam(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 87b071ab8647..8547f0aeafc6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -285,6 +285,9 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
+ /** Work task for vblank-related ring access */
+ struct work_struct vblank_work;
+
uint32_t next_gem_seqno;
/**
@@ -435,6 +438,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
void i915_user_irq_get(struct drm_device *dev);
void i915_user_irq_put(struct drm_device *dev);
+extern void i915_gem_vblank_work_handler(struct work_struct *work);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device *dev);
@@ -538,6 +542,17 @@ extern void intel_opregion_free(struct drm_device *dev);
extern void opregion_asle_intr(struct drm_device *dev);
extern void opregion_enable_asle(struct drm_device *dev);
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
+ if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file_priv); \
+} while (0)
+
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 90ae8a0369f7..bb6e5a37efa2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2491,6 +2491,8 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
+ INIT_WORK(&dev_priv->mm.vblank_work,
+ i915_gem_vblank_work_handler);
dev_priv->mm.next_gem_seqno = 1;
i915_gem_detect_bit_6_swizzle(dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f295bdf16e2d..d04c526410a9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -349,6 +349,21 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
return count;
}
+void
+i915_gem_vblank_work_handler(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_device *dev;
+
+ dev_priv = container_of(work, drm_i915_private_t,
+ mm.vblank_work);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_vblank_tasklet(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -422,8 +437,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (iir & I915_ASLE_INTERRUPT)
opregion_asle_intr(dev);
- if (vblank && dev_priv->swaps_pending > 0)
- drm_locked_tasklet(dev, i915_vblank_tasklet);
+ if (vblank && dev_priv->swaps_pending > 0) {
+ if (dev_priv->ring.ring_obj == NULL)
+ drm_locked_tasklet(dev, i915_vblank_tasklet);
+ else
+ schedule_work(&dev_priv->mm.vblank_work);
+ }
return IRQ_HANDLED;
}
@@ -514,14 +533,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
-
+ mutex_lock(&dev->struct_mutex);
result = i915_emit_irq(dev);
+ mutex_unlock(&dev->struct_mutex);
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");