summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c40
-rw-r--r--drivers/gpu/drm/drm_context.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs.c1
-rw-r--r--drivers/gpu/drm/drm_drawable.c198
-rw-r--r--drivers/gpu/drm/drm_drv.c10
-rw-r--r--drivers/gpu/drm/drm_edid.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c14
-rw-r--r--drivers/gpu/drm/drm_info.c14
-rw-r--r--drivers/gpu/drm/drm_lock.c30
-rw-r--r--drivers/gpu/drm/drm_memory.c14
-rw-r--r--drivers/gpu/drm/drm_proc.c14
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c4
-rw-r--r--drivers/gpu/drm/drm_vm.c13
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c66
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c340
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c327
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c212
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h226
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1156
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c148
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c52
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c183
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h298
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c28
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c171
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c124
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2303
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c272
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h126
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c69
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c31
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c61
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c483
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c435
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c (renamed from drivers/gpu/drm/i915/i915_opregion.c)181
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1006
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c109
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c393
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h66
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c894
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c165
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
62 files changed, 5535 insertions, 4891 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f3a23a329f4e..997c43d04909 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
- drm_context.o drm_dma.o drm_drawable.o \
+ drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index ba38e0147220..252fdb98b73a 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -193,7 +193,7 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
- * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
@@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
- if (!(memory = drm_alloc_agp(dev, pages, type))) {
+ if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
kfree(entry);
return -ENOMEM;
}
@@ -423,38 +423,6 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
return head;
}
-/** Calls agp_allocate_memory() */
-DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
- size_t pages, u32 type)
-{
- return agp_allocate_memory(bridge, pages, type);
-}
-
-/** Calls agp_free_memory() */
-int drm_agp_free_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return 0;
- agp_free_memory(handle);
- return 1;
-}
-
-/** Calls agp_bind_memory() */
-int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
-{
- if (!handle)
- return -EINVAL;
- return agp_bind_memory(handle, start);
-}
-
-/** Calls agp_unbind_memory() */
-int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return -EINVAL;
- return agp_unbind_memory(handle);
-}
-
/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
@@ -474,7 +442,7 @@ drm_agp_bind_pages(struct drm_device *dev,
DRM_DEBUG("\n");
- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+ mem = agp_allocate_memory(dev->agp->bridge, num_pages,
type);
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
@@ -487,7 +455,7 @@ drm_agp_bind_pages(struct drm_device *dev,
mem->page_count = num_pages;
mem->is_flushed = true;
- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+ ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 2607753a320b..6d440fb894cf 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -333,14 +333,6 @@ int drm_addctx(struct drm_device *dev, void *data,
return -ENOMEM;
}
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_ctor)
- if (!dev->driver->context_ctor(dev, ctx->handle)) {
- DRM_DEBUG("Running out of ctxs or memory.\n");
- return -ENOMEM;
- }
- }
-
ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 677b275fa721..9d8c892d07c9 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -48,7 +48,6 @@ static struct drm_info_list drm_debugfs_list[] = {
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
deleted file mode 100644
index c53c9768cc11..000000000000
--- a/drivers/gpu/drm/drm_drawable.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- unsigned long irqflags;
- struct drm_draw *draw = data;
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
- ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
- if (ret == -EAGAIN) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- goto again;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- draw->handle = new_id;
-
- DRM_DEBUG("%d\n", draw->handle);
-
- return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_draw *draw = data;
- unsigned long irqflags;
- struct drm_drawable_info *info;
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- info = drm_get_drawable_info(dev, draw->handle);
- if (info == NULL) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- return -EINVAL;
- }
- kfree(info->rects);
- kfree(info);
-
- idr_remove(&dev->drw_idr, draw->handle);
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("%d\n", draw->handle);
- return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_update_draw *update = data;
- unsigned long irqflags;
- struct drm_clip_rect *rects;
- struct drm_drawable_info *info;
- int err;
-
- info = idr_find(&dev->drw_idr, update->handle);
- if (!info) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
- DRM_ERROR("No such drawable %d\n", update->handle);
- kfree(info);
- return -EINVAL;
- }
- }
-
- switch (update->type) {
- case DRM_DRAWABLE_CLIPRECTS:
- if (update->num == 0)
- rects = NULL;
- else if (update->num != info->num_rects) {
- rects = kmalloc(update->num *
- sizeof(struct drm_clip_rect),
- GFP_KERNEL);
- } else
- rects = info->rects;
-
- if (update->num && !rects) {
- DRM_ERROR("Failed to allocate cliprect memory\n");
- err = -ENOMEM;
- goto error;
- }
-
- if (update->num && DRM_COPY_FROM_USER(rects,
- (struct drm_clip_rect __user *)
- (unsigned long)update->data,
- update->num *
- sizeof(*rects))) {
- DRM_ERROR("Failed to copy cliprects from userspace\n");
- err = -EFAULT;
- goto error;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- if (rects != info->rects) {
- kfree(info->rects);
- }
-
- info->rects = rects;
- info->num_rects = update->num;
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, update->handle);
- break;
- default:
- DRM_ERROR("Invalid update type %d\n", update->type);
- return -EINVAL;
- }
-
- return 0;
-
-error:
- if (rects != info->rects)
- kfree(rects);
-
- return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
- return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
- struct drm_drawable_info *info = p;
-
- if (info) {
- kfree(info->rects);
- kfree(info);
- }
-
- return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
- idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
- idr_remove_all(&dev->drw_idr);
-}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 84da748555bc..5ff75a3a6b9d 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -91,8 +91,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
@@ -127,7 +127,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
@@ -180,10 +180,6 @@ int drm_lastclose(struct drm_device * dev)
mutex_lock(&dev->struct_mutex);
- /* Free drawable information memory */
- drm_drawable_free_all(dev);
- del_timer(&dev->timer);
-
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 96e963108225..fd033ebbdf84 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 5663d2719063..ea1c4b019ebf 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -92,12 +92,6 @@ drm_gem_init(struct drm_device *dev)
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
- atomic_set(&dev->object_count, 0);
- atomic_set(&dev->object_memory, 0);
- atomic_set(&dev->pin_count, 0);
- atomic_set(&dev->pin_memory, 0);
- atomic_set(&dev->gtt_count, 0);
- atomic_set(&dev->gtt_memory, 0);
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
if (!mm) {
@@ -151,9 +145,6 @@ int drm_gem_object_init(struct drm_device *dev,
atomic_set(&obj->handle_count, 0);
obj->size = size;
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
-
return 0;
}
EXPORT_SYMBOL(drm_gem_object_init);
@@ -180,8 +171,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
return obj;
fput:
/* Object_init mangles the global counters - readjust them. */
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@@ -436,10 +425,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
}
EXPORT_SYMBOL(drm_gem_object_release);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 974e970ce3f8..3cdbaf379bb5 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -270,20 +270,6 @@ int drm_gem_name_info(struct seq_file *m, void *data)
return 0;
}
-int drm_gem_object_info(struct seq_file *m, void* data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
- seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
- seq_printf(m, "%d gtt total\n", dev->gtt_total);
- return 0;
-}
-
#if DRM_DEBUG_CODE
int drm_vma_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 9bf93bc9a32c..632ae243ede0 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -37,6 +37,8 @@
static int drm_notifier(void *priv);
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
/**
* Lock ioctl.
*
@@ -124,9 +126,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
}
- if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
- dev->driver->dma_ready(dev);
-
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
{
if (dev->driver->dma_quiescent(dev)) {
@@ -136,12 +135,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
}
- if (dev->driver->kernel_context_switch &&
- dev->last_context != lock->context) {
- dev->driver->kernel_context_switch(dev, dev->last_context,
- lock->context);
- }
-
return 0;
}
@@ -169,15 +162,8 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
- /* kernel_context_switch isn't used by any of the x86 drm
- * modules but is required by the Sparc driver.
- */
- if (dev->driver->kernel_context_switch_unlock)
- dev->driver->kernel_context_switch_unlock(dev);
- else {
- if (drm_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
}
unblock_all_signals();
@@ -193,6 +179,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
+static
int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
@@ -229,7 +216,6 @@ int drm_lock_take(struct drm_lock_data *lock_data,
}
return 0;
}
-EXPORT_SYMBOL(drm_lock_take);
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -297,7 +283,6 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
-EXPORT_SYMBOL(drm_lock_free);
/**
* If we get here, it means that the process has called DRM_IOCTL_LOCK
@@ -360,7 +345,6 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
-EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(struct drm_lock_data *lock_data)
{
@@ -380,8 +364,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
-EXPORT_SYMBOL(drm_idlelock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
@@ -390,5 +372,3 @@ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
-
-EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 7732268eced2..c9b805000a11 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -99,29 +99,23 @@ static void *agp_remap(unsigned long offset, unsigned long size,
return addr;
}
-/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
-{
- return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
-}
-
/** Wrapper around agp_free_memory() */
-int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
- return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+ agp_free_memory(handle);
}
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
{
- return drm_agp_bind_memory(handle, start);
+ return agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM * handle)
{
- return drm_agp_unbind_memory(handle);
+ return agp_unbind_memory(handle);
}
EXPORT_SYMBOL(drm_unbind_agp);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index a9ba6b69ad35..9e5b07efebb7 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -55,7 +55,6 @@ static struct drm_info_list drm_proc_list[] = {
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
@@ -151,7 +150,6 @@ fail:
int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root)
{
- struct drm_device *dev = minor->dev;
char name[64];
int ret;
@@ -172,14 +170,6 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
return ret;
}
- if (dev->driver->proc_init) {
- ret = dev->driver->proc_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/proc/dri.\n");
- return ret;
- }
- }
return 0;
}
@@ -216,15 +206,11 @@ int drm_proc_remove_files(struct drm_info_list *files, int count,
*/
int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
{
- struct drm_device *dev = minor->dev;
char name[64];
if (!root || !minor->proc_root)
return 0;
- if (dev->driver->proc_cleanup)
- dev->driver->proc_cleanup(minor);
-
drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
sprintf(name, "%d", minor->index);
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 9034c4c6100d..d15e09b0ae0b 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -184,8 +184,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
drm_sg_cleanup(entry);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_sg_alloc);
-
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d1ad57450df1..cdc89ee042cc 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -240,14 +240,10 @@ int drm_fill_in_dev(struct drm_device *dev,
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->event_lock);
- init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
- idr_init(&dev->drw_idr);
-
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 5df450683aab..2c3fcbdfd8ff 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -523,14 +523,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
-{
- return map->offset;
-}
-
-EXPORT_SYMBOL(drm_core_get_map_ofs);
-
-resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
return dev->hose->dense_mem_base - dev->hose->mem_space->start;
@@ -539,8 +532,6 @@ resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
#endif
}
-EXPORT_SYMBOL(drm_core_get_reg_ofs);
-
/**
* mmap DMA memory.
*
@@ -627,7 +618,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
- offset = dev->driver->get_reg_ofs(dev);
+ offset = drm_core_get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#if !defined(__arm__)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index b4250b2cac1f..1c73b0c43c1e 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -52,8 +52,6 @@ static struct drm_driver driver = {
.device_is_agp = i810_driver_device_is_agp,
.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = i810_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index a5c66aa82f0c..7140ffc12eee 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -57,8 +57,6 @@ static struct drm_driver driver = {
.device_is_agp = i830_driver_device_is_agp,
.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
.dma_quiescent = i830_driver_dma_quiescent,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
#if USE_IRQS
.irq_preinstall = i830_driver_irq_preinstall,
.irq_postinstall = i830_driver_irq_postinstall,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5c8e53458edb..f6e98dd416c9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -26,13 +26,13 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_dvo.o \
intel_ringbuffer.o \
intel_overlay.o \
+ intel_opregion.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o
-i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 14d59804acd7..af70337567ce 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -165,67 +165,44 @@ struct ch7017_priv {
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
-static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- u8 out_buf[2];
- u8 in_buf[2];
-
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &addr,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
- .buf = in_buf,
+ .buf = val,
}
};
-
- out_buf[0] = addr;
- out_buf[1] = 0;
-
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
- *val= in_buf[0];
- return true;
- };
-
- return false;
+ return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
}
-static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- uint8_t out_buf[2];
+ uint8_t buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
- .buf = out_buf,
+ .buf = buf,
};
-
- out_buf[0] = addr;
- out_buf[1] = val;
-
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
- return true;
-
- return false;
+ return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
}
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
struct ch7017_priv *priv;
- uint8_t val;
+ const char *str;
+ u8 val;
priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
if (priv == NULL)
@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
goto fail;
- if (val != CH7017_DEVICE_ID_VALUE &&
- val != CH7018_DEVICE_ID_VALUE &&
- val != CH7019_DEVICE_ID_VALUE) {
+ switch (val) {
+ case CH7017_DEVICE_ID_VALUE:
+ str = "ch7017";
+ break;
+ case CH7018_DEVICE_ID_VALUE:
+ str = "ch7018";
+ break;
+ case CH7019_DEVICE_ID_VALUE:
+ str = "ch7019";
+ break;
+ default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
- "Slave %d.\n",
- val, i2cbus->adapter.name,dvo->slave_addr);
+ "slave %d.\n",
+ val, adapter->name,dvo->slave_addr);
goto fail;
}
+ DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+ str, adapter->name, dvo->slave_addr);
return true;
+
fail:
kfree(priv);
return false;
@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
}
/* XXX: Should actually wait for update power status somehow */
- udelay(20000);
+ msleep(20);
}
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 6f1944b24441..7eaa94e4ff06 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index a2ec3f487202..a12ed9414cc7 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[1];
u8 in_buf[2];
@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
out_buf[0] = addr;
- if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
+ if (i2c_transfer(adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
};
@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
"%s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[3];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
out_buf[1] = data & 0xff;
out_buf[2] = data >> 8;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 9b8e6765cf26..e4b4091df942 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil= dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 56f66426207f..8ab2855bb544 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5e43d7076789..d598070fb279 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,9 +40,51 @@
#if defined(CONFIG_DEBUG_FS)
-#define ACTIVE_LIST 1
-#define FLUSHING_LIST 2
-#define INACTIVE_LIST 3
+enum {
+ RENDER_LIST,
+ BSD_LIST,
+ FLUSHING_LIST,
+ INACTIVE_LIST,
+ PINNED_LIST,
+ DEFERRED_FREE_LIST,
+};
+
+static const char *yesno(int v)
+{
+ return v ? "yes" : "no";
+}
+
+static int i915_capabilities(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ const struct intel_device_info *info = INTEL_INFO(dev);
+
+ seq_printf(m, "gen: %d\n", info->gen);
+#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+ B(is_mobile);
+ B(is_i85x);
+ B(is_i915g);
+ B(is_i945gm);
+ B(is_g33);
+ B(need_gfx_hws);
+ B(is_g4x);
+ B(is_pineview);
+ B(is_broadwater);
+ B(is_crestline);
+ B(is_ironlake);
+ B(has_fbc);
+ B(has_rc6);
+ B(has_pipe_cxsr);
+ B(has_hotplug);
+ B(cursor_needs_physical);
+ B(has_overlay);
+ B(overlay_needs_physical);
+ B(supports_tv);
+#undef B
+
+ return 0;
+}
static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
{
@@ -64,6 +106,27 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
}
}
+static void
+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+ seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+ obj->base.size,
+ obj->base.read_domains,
+ obj->base.write_domain,
+ obj->last_rendering_seqno,
+ obj->dirty ? " dirty" : "",
+ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ if (obj->base.name)
+ seq_printf(m, " (name: %d)", obj->base.name);
+ if (obj->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", obj->fence_reg);
+ if (obj->gtt_space != NULL)
+ seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+}
+
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -72,56 +135,84 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
- spinlock_t *lock = NULL;
+ size_t total_obj_size, total_gtt_size;
+ int count, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
switch (list) {
- case ACTIVE_LIST:
- seq_printf(m, "Active:\n");
- lock = &dev_priv->mm.active_list_lock;
+ case RENDER_LIST:
+ seq_printf(m, "Render:\n");
head = &dev_priv->render_ring.active_list;
break;
+ case BSD_LIST:
+ seq_printf(m, "BSD:\n");
+ head = &dev_priv->bsd_ring.active_list;
+ break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
+ case PINNED_LIST:
+ seq_printf(m, "Pinned:\n");
+ head = &dev_priv->mm.pinned_list;
+ break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
+ case DEFERRED_FREE_LIST:
+ seq_printf(m, "Deferred free:\n");
+ head = &dev_priv->mm.deferred_free_list;
+ break;
default:
- DRM_INFO("Ooops, unexpected list\n");
- return 0;
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
- if (lock)
- spin_lock(lock);
- list_for_each_entry(obj_priv, head, list)
- {
- seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- &obj_priv->base,
- get_pin_flag(obj_priv),
- obj_priv->base.size,
- obj_priv->base.read_domains,
- obj_priv->base.write_domain,
- obj_priv->last_rendering_seqno,
- obj_priv->dirty ? " dirty" : "",
- obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
-
- if (obj_priv->base.name)
- seq_printf(m, " (name: %d)", obj_priv->base.name);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
- if (obj_priv->gtt_space != NULL)
- seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
-
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj_priv, head, list) {
+ seq_printf(m, " ");
+ describe_obj(m, obj_priv);
seq_printf(m, "\n");
+ total_obj_size += obj_priv->base.size;
+ total_gtt_size += obj_priv->gtt_space->size;
+ count++;
}
+ mutex_unlock(&dev->struct_mutex);
- if (lock)
- spin_unlock(lock);
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
return 0;
}
+static int i915_gem_object_info(struct seq_file *m, void* data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
+ seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
+ seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
+ seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
+ seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
+ seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
+ seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -176,6 +267,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "Request:\n");
list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
@@ -184,6 +280,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -192,16 +290,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev, &dev_priv->render_ring));
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
seq_printf(m, "Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -211,6 +317,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
@@ -247,7 +358,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
atomic_read(&dev_priv->irq_received));
if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev, &dev_priv->render_ring));
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -255,6 +366,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
dev_priv->mm.waiting_gem_seqno);
seq_printf(m, "IRQ sequence: %d\n",
dev_priv->mm.irq_gem_seqno);
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -263,7 +376,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -289,6 +406,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "\n");
}
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -313,16 +431,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
+static void i915_dump_object(struct seq_file *m,
+ struct io_mapping *mapping,
+ struct drm_i915_gem_object *obj_priv)
{
- int page, i;
- uint32_t *mem;
+ int page, page_count, i;
+ page_count = obj_priv->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) {
- mem = kmap_atomic(pages[page], KM_USER0);
+ u32 *mem = io_mapping_map_wc(mapping,
+ obj_priv->gtt_offset + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap_atomic(mem, KM_USER0);
+ io_mapping_unmap(mem);
}
}
@@ -335,27 +456,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj_priv;
int ret;
- spin_lock(&dev_priv->mm.active_list_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
list) {
obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret) {
- DRM_ERROR("Failed to get pages: %d\n", ret);
- spin_unlock(&dev_priv->mm.active_list_lock);
- return ret;
- }
-
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
- i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
-
- i915_gem_object_put_pages(obj);
+ seq_printf(m, "--- gtt_offset = 0x%08x\n",
+ obj_priv->gtt_offset);
+ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
}
}
- spin_unlock(&dev_priv->mm.active_list_lock);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -365,20 +480,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u8 *virt;
- uint32_t *ptr, off;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (!dev_priv->render_ring.gem_object) {
seq_printf(m, "No ringbuffer setup\n");
- return 0;
- }
-
- virt = dev_priv->render_ring.virtual_start;
+ } else {
+ u8 *virt = dev_priv->render_ring.virtual_start;
+ uint32_t off;
- for (off = 0; off < dev_priv->render_ring.size; off += 4) {
- ptr = (uint32_t *)(virt + off);
- seq_printf(m, "%08x : %08x\n", off, *ptr);
+ for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+ uint32_t *ptr = (uint32_t *)(virt + off);
+ seq_printf(m, "%08x : %08x\n", off, *ptr);
+ }
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -396,7 +515,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, "RingHead : %08x\n", head);
seq_printf(m, "RingTail : %08x\n", tail);
seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
- seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+ seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
return 0;
}
@@ -458,7 +577,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
}
@@ -642,6 +761,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
} else {
seq_printf(m, "FBC disabled: ");
switch (dev_priv->no_fbc_reason) {
+ case FBC_NO_OUTPUT:
+ seq_printf(m, "no outputs");
+ break;
case FBC_STOLEN_TOO_SMALL:
seq_printf(m, "not enough stolen memory");
break;
@@ -675,15 +797,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
- if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_IRONLAKE(dev))
+ sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
else if (IS_I915GM(dev))
sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
- "disabled");
+ seq_printf(m, "self-refresh: %s\n",
+ sr_enabled ? "enabled" : "disabled");
return 0;
}
@@ -694,10 +818,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
temp = i915_mch_val(dev_priv);
chipset = i915_chipset_val(dev_priv);
gfx = i915_gfx_val(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
seq_printf(m, "GMCH temp: %ld\n", temp);
seq_printf(m, "Chipset power: %ld\n", chipset);
@@ -718,6 +848,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
return 0;
}
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev;
+ struct intel_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ifbdev = dev_priv->fbdev;
+ fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel);
+ describe_obj(m, to_intel_bo(fb->obj));
+ seq_printf(m, "\n");
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+ if (&fb->base == ifbdev->helper.fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel);
+ describe_obj(m, to_intel_bo(fb->obj));
+ seq_printf(m, "\n");
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -741,6 +933,9 @@ i915_wedged_read(struct file *filp,
"wedged : %d\n",
atomic_read(&dev_priv->mm.wedged));
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
@@ -770,7 +965,7 @@ i915_wedged_write(struct file *filp,
atomic_set(&dev_priv->mm.wedged, val);
if (val) {
- DRM_WAKEUP(&dev_priv->irq_queue);
+ wake_up_all(&dev_priv->irq_queue);
queue_work(dev_priv->wq, &dev_priv->error_work);
}
@@ -823,9 +1018,14 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
- {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+ {"i915_capabilities", i915_capabilities, 0, 0},
+ {"i915_gem_objects", i915_gem_object_info, 0},
+ {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST},
+ {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
+ {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -845,6 +1045,8 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
+ {"i915_opregion", i915_opregion, 0},
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9d67b4853030..726c3736082f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,8 +40,7 @@
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-
-extern int intel_max_stolen; /* from AGP driver */
+#include <acpi/video.h>
/**
* Sets up the hardware status page for devices that need a physical address
@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
0xf0;
@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
- ring->setup_status_page(dev, ring);
+ intel_ring_setup_status_page(dev, ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
return -EINVAL;
}
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
BEGIN_LP_RING(4);
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
if (!IS_I830(dev) && !IS_845G(dev)) {
BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
} else {
@@ -888,12 +887,12 @@ static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -920,7 +919,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return ret;
}
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -934,7 +933,7 @@ static void
intel_setup_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -971,7 +970,7 @@ static void
intel_teardown_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
if (dev_priv->mchbar_need_disable) {
@@ -990,174 +989,6 @@ intel_teardown_mchbar(struct drm_device *dev)
release_resource(&dev_priv->mch_res);
}
-/**
- * i915_probe_agp - get AGP bootup configuration
- * @pdev: PCI device
- * @aperture_size: returns AGP aperture configured size
- * @preallocated_size: returns size of BIOS preallocated AGP space
- *
- * Since Intel integrated graphics are UMA, the BIOS has to set aside
- * some RAM for the framebuffer at early boot. This code figures out
- * how much was set aside so we can use it for our own purposes.
- */
-static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
- uint32_t *preallocated_size,
- uint32_t *start)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 tmp = 0;
- unsigned long overhead;
- unsigned long stolen;
-
- /* Get the fb aperture size and "stolen" memory amount. */
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
-
- *aperture_size = 1024 * 1024;
- *preallocated_size = 1024 * 1024;
-
- switch (dev->pdev->device) {
- case PCI_DEVICE_ID_INTEL_82830_CGC:
- case PCI_DEVICE_ID_INTEL_82845G_IG:
- case PCI_DEVICE_ID_INTEL_82855GM_IG:
- case PCI_DEVICE_ID_INTEL_82865_IG:
- if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
- *aperture_size *= 64;
- else
- *aperture_size *= 128;
- break;
- default:
- /* 9xx supports large sizes, just look at the length */
- *aperture_size = pci_resource_len(dev->pdev, 2);
- break;
- }
-
- /*
- * Some of the preallocated space is taken by the GTT
- * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
- */
- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
- overhead = 4096;
- else
- overhead = (*aperture_size / 1024) + 4096;
-
- if (IS_GEN6(dev)) {
- /* SNB has memory control reg at 0x50.w */
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
-
- switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case SNB_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- stolen = 192 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- stolen = 288 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- stolen = 320 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- stolen = 384 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- stolen = 416 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- stolen = 448 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- stolen = 480 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- stolen = 512 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & SNB_GMCH_GMS_STOLEN_MASK);
- return -1;
- }
- } else {
- switch (tmp & INTEL_GMCH_GMS_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case INTEL_855_GMCH_GMS_STOLEN_1M:
- stolen = 1 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_4M:
- stolen = 4 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_8M:
- stolen = 8 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_16M:
- stolen = 16 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_48M:
- stolen = 48 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_GMCH_GMS_MASK);
- return -1;
- }
- }
-
- *preallocated_size = stolen - overhead;
- *start = overhead;
-
- return 0;
-}
-
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
@@ -1181,11 +1012,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
{
unsigned long *gtt;
unsigned long entry, phys;
- int gtt_bar = IS_I9XX(dev) ? 0 : 1;
+ int gtt_bar = IS_GEN2(dev) ? 1 : 0;
int gtt_offset, gtt_size;
- if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1210,10 +1041,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
/* Mask out these reserved bits on this hardware. */
- if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
- IS_I945G(dev) || IS_I945GM(dev)) {
+ if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
entry &= ~PTE_ADDRESS_MASK_HIGH;
- }
/* If it's not a mapping type we know, then bail. */
if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
@@ -1252,7 +1081,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long ll_base = 0;
/* Leave 1M for line length buffer & misc. */
- compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
if (!compressed_fb) {
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
@@ -1273,7 +1102,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
}
if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
4096, 0);
if (!compressed_llb) {
i915_warn_stolen(dev);
@@ -1343,10 +1172,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
- drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "i915: switched off\n");
- drm_kms_helper_poll_disable(dev);
i915_suspend(dev, pmm);
}
}
@@ -1363,20 +1190,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
}
static int i915_load_modeset_init(struct drm_device *dev,
- unsigned long prealloc_start,
unsigned long prealloc_size,
unsigned long agp_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int fb_bar = IS_I9XX(dev) ? 2 : 0;
int ret = 0;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
- 0xff000000;
-
- /* Basic memrange allocator for stolen space (aka vram) */
- drm_mm_init(&dev_priv->vram, 0, prealloc_size);
- DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
+ /* Basic memrange allocator for stolen space (aka mm.vram) */
+ drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
@@ -1443,12 +1264,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
- /*
- * Initialize the hardware status page IRQ location.
- */
-
- I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
-
ret = intel_fbdev_init(dev);
if (ret)
goto cleanup_irq;
@@ -1787,9 +1602,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
}
}
- div_u64(diff, diff1);
+ diff = div_u64(diff, diff1);
ret = ((m * diff) + c);
- div_u64(ret, 10);
+ ret = div_u64(ret, 10);
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
@@ -1858,7 +1673,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
/* More magic constants... */
diff = diff * 1181;
- div_u64(diff, diffms * 10);
+ diff = div_u64(diff, diffms * 10);
dev_priv->gfx_power = diff;
}
@@ -1907,7 +1722,7 @@ static struct drm_i915_private *i915_mch_dev;
* - dev_priv->fmax
* - dev_priv->gpu_busy
*/
-DEFINE_SPINLOCK(mchdev_lock);
+static DEFINE_SPINLOCK(mchdev_lock);
/**
* i915_read_mch_val - return value for IPS use
@@ -2062,7 +1877,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
- uint32_t agp_size, prealloc_size, prealloc_start;
+ uint32_t agp_size, prealloc_size;
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -2079,7 +1894,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->info = (struct intel_device_info *) flags;
/* Add register map (needed for suspend/resume) */
- mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
base = pci_resource_start(dev->pdev, mmio_bar);
size = pci_resource_len(dev->pdev, mmio_bar);
@@ -2121,17 +1936,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
"performance may suffer.\n");
}
- ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
- if (ret)
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
goto out_iomapfree;
-
- if (prealloc_size > intel_max_stolen) {
- DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
- prealloc_size >> 20, intel_max_stolen >> 20);
- prealloc_size = intel_max_stolen;
}
- dev_priv->wq = create_singlethread_workqueue("i915");
+ prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ /* The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and hangcheck.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time: max_active = 1 and NON_REENTRANT.
+ */
+ dev_priv->wq = alloc_workqueue("i915",
+ WQ_UNBOUND | WQ_NON_REENTRANT,
+ 1);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -2166,6 +1996,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
+ intel_setup_gmbus(dev);
+ intel_opregion_setup(dev);
i915_gem_load(dev);
@@ -2212,8 +2044,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_start,
- prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_workqueue_free;
@@ -2221,7 +2052,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
/* Must be done after probing outputs */
- intel_opregion_init(dev, 0);
+ intel_opregion_init(dev);
+ acpi_video_register();
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
@@ -2249,15 +2081,20 @@ free_priv:
int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
-
- i915_destroy_error_state(dev);
+ int ret;
spin_lock(&mchdev_lock);
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
- destroy_workqueue(dev_priv->wq);
- del_timer_sync(&dev_priv->hangcheck_timer);
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Cancel the retire work handler, which should be idle now. */
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -2266,7 +2103,10 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->mm.gtt_mtrr = -1;
}
+ acpi_video_unregister();
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
/*
@@ -2278,20 +2118,28 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->child_dev = NULL;
dev_priv->child_dev_num = 0;
}
- drm_irq_uninstall(dev);
+
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
+ /* Free error state after interrupts are fully disabled. */
+ del_timer_sync(&dev_priv->hangcheck_timer);
+ cancel_work_sync(&dev_priv->error_work);
+ i915_destroy_error_state(dev);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
- intel_opregion_free(dev, 0);
+ intel_opregion_fini(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
+
i915_gem_free_all_phys_object(dev);
mutex_lock(&dev->struct_mutex);
@@ -2299,34 +2147,35 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->vram);
- i915_gem_lastclose(dev);
+ drm_mm_takedown(&dev_priv->mm.vram);
intel_cleanup_overlay(dev);
}
+ intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ destroy_workqueue(dev_priv->wq);
+
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
return 0;
}
-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv;
+ struct drm_i915_file_private *file_priv;
DRM_DEBUG_DRIVER("\n");
- i915_file_priv = (struct drm_i915_file_private *)
- kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
-
- if (!i915_file_priv)
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
return -ENOMEM;
- file_priv->driver_priv = i915_file_priv;
+ file->driver_priv = file_priv;
- INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+ spin_lock_init(&file_priv->mm.lock);
+ INIT_LIST_HEAD(&file_priv->mm.request_list);
return 0;
}
@@ -2369,11 +2218,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
- kfree(i915_file_priv);
+ kfree(file_priv);
}
struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6dbe14cc4f74..c3decb2fef4b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_drv.h"
#include <linux/console.h>
#include "drm_crtc_helper.h"
@@ -61,86 +62,108 @@ extern int intel_agp_enabled;
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = {
- .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_845g_info = {
- .gen = 2, .is_i8xx = 1,
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i85x_info = {
- .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .gen = 2, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i865g_info = {
- .gen = 2, .is_i8xx = 1,
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i915g_info = {
- .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .gen = 3, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_mobile = 1,
.cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_i965g_info = {
- .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 4, .is_broadwater = 1,
.has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .gen = 4, .is_crestline = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_g33_info = {
- .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+ .gen = 3, .is_g33 = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_g45_info = {
- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_gm45_info = {
- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+ .gen = 4, .is_g4x = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_pineview_info = {
- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_ironlake_d_info = {
- .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 5, .is_ironlake = 1,
.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 5, .is_ironlake = 1, .is_mobile = 1,
.need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
- .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 6,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+ .gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -237,7 +260,7 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
- intel_opregion_free(dev, 1);
+ intel_opregion_fini(dev);
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
@@ -258,6 +281,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
+ drm_kms_helper_poll_disable(dev);
+
error = i915_drm_freeze(dev);
if (error)
return error;
@@ -277,8 +302,7 @@ static int i915_drm_thaw(struct drm_device *dev)
int error = 0;
i915_restore_state(dev);
-
- intel_opregion_init(dev, 1);
+ intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -294,6 +318,8 @@ static int i915_drm_thaw(struct drm_device *dev)
drm_helper_resume_force_mode(dev);
}
+ intel_opregion_init(dev);
+
dev_priv->modeset_on_lid = 0;
return error;
@@ -301,12 +327,79 @@ static int i915_drm_thaw(struct drm_device *dev)
int i915_resume(struct drm_device *dev)
{
+ int ret;
+
if (pci_enable_device(dev->pdev))
return -EIO;
pci_set_master(dev->pdev);
- return i915_drm_thaw(dev);
+ ret = i915_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ msleep(1);
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ msleep(1);
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ return gdrst & 0x1;
+}
+
+static int i965_do_reset(struct drm_device *dev, u8 flags)
+{
+ u8 gdrst;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+
+ return wait_for(i965_reset_complete(dev), 500);
+}
+
+static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+ return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
/**
@@ -325,54 +418,39 @@ int i915_resume(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-int i965_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev, u8 flags)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned long timeout;
- u8 gdrst;
/*
* We really should only reset the display subsystem if we actually
* need to
*/
bool need_display = true;
+ int ret;
mutex_lock(&dev->struct_mutex);
- /*
- * Clear request list
- */
- i915_gem_retire_requests(dev);
-
- if (need_display)
- i915_save_display(dev);
-
- if (IS_I965G(dev) || IS_G4X(dev)) {
- /*
- * Set the domains we want to reset, then the reset bit (bit 0).
- * Clear the reset bit after a while and wait for hardware status
- * bit (bit 1) to be set
- */
- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
- udelay(50);
- pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
-
- /* ...we don't want to loop forever though, 500ms should be plenty */
- timeout = jiffies + msecs_to_jiffies(500);
- do {
- udelay(100);
- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
- } while ((gdrst & 0x1) && time_after(timeout, jiffies));
-
- if (gdrst & 0x1) {
- WARN(true, "i915: Failed to reset chip\n");
- mutex_unlock(&dev->struct_mutex);
- return -EIO;
- }
- } else {
- DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ i915_gem_reset(dev);
+
+ ret = -ENODEV;
+ if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+ DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+ } else switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ ret = ironlake_do_reset(dev, flags);
+ break;
+ case 4:
+ ret = i965_do_reset(dev, flags);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev, flags);
+ break;
+ }
+ dev_priv->last_gpu_reset = get_seconds();
+ if (ret) {
+ DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
- return -ENODEV;
+ return ret;
}
/* Ok, now get things going again... */
@@ -400,13 +478,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
mutex_lock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
+
/*
- * Display needs restore too...
+ * Perform a full modeset as on later generations, e.g. Ironlake, we may
+ * need to retrain the display link and cannot just restore the register
+ * values.
*/
- if (need_display)
- i915_restore_display(dev);
+ if (need_display) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -524,8 +608,6 @@ static struct drm_driver driver = {
.irq_uninstall = i915_driver_irq_uninstall,
.irq_handler = i915_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index af4a263cf257..73ad8bff2c2a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,6 +34,8 @@
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <drm/intel-gtt.h>
/* General customization:
*/
@@ -73,11 +75,9 @@ enum plane {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
-#define WATCH_BUF 0
#define WATCH_EXEC 0
-#define WATCH_LRU 0
#define WATCH_RELOC 0
-#define WATCH_INACTIVE 0
+#define WATCH_LISTS 0
#define WATCH_PWRITE 0
#define I915_GEM_PHYS_CURSOR_0 1
@@ -110,8 +110,9 @@ struct intel_opregion {
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
- int enabled;
+ void *vbt;
};
+#define OPREGION_SIZE (8*1024)
struct intel_overlay;
struct intel_overlay_error_state;
@@ -125,13 +126,16 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
struct list_head lru_list;
+ bool gpu;
};
struct sdvo_device_mapping {
+ u8 initialized;
u8 dvo_port;
u8 slave_addr;
u8 dvo_wiring;
- u8 initialized;
+ u8 i2c_pin;
+ u8 i2c_speed;
u8 ddc_pin;
};
@@ -193,13 +197,9 @@ struct drm_i915_display_funcs {
struct intel_device_info {
u8 gen;
u8 is_mobile : 1;
- u8 is_i8xx : 1;
u8 is_i85x : 1;
u8 is_i915g : 1;
- u8 is_i9xx : 1;
u8 is_i945gm : 1;
- u8 is_i965g : 1;
- u8 is_i965gm : 1;
u8 is_g33 : 1;
u8 need_gfx_hws : 1;
u8 is_g4x : 1;
@@ -212,9 +212,14 @@ struct intel_device_info {
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
u8 cursor_needs_physical : 1;
+ u8 has_overlay : 1;
+ u8 overlay_needs_physical : 1;
+ u8 supports_tv : 1;
+ u8 has_bsd_ring : 1;
};
enum no_fbc_reason {
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
FBC_MODE_TOO_LARGE, /* mode too large for compression */
@@ -241,6 +246,12 @@ typedef struct drm_i915_private {
void __iomem *regs;
+ struct intel_gmbus {
+ struct i2c_adapter adapter;
+ struct i2c_adapter *force_bit;
+ u32 reg0;
+ } *gmbus;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
@@ -263,6 +274,9 @@ typedef struct drm_i915_private {
int front_offset;
int current_page;
int page_flipping;
+#define I915_DEBUG_READ (1<<0)
+#define I915_DEBUG_WRITE (1<<1)
+ unsigned long debug_flags;
wait_queue_head_t irq_queue;
atomic_t irq_received;
@@ -289,24 +303,21 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
- u32 flush_rings;
-#define FLUSH_RENDER_RING 0x1
-#define FLUSH_BSD_RING 0x2
/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
+#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
uint32_t last_instdone;
uint32_t last_instdone1;
- struct drm_mm vram;
-
unsigned long cfb_size;
unsigned long cfb_pitch;
+ unsigned long cfb_offset;
int cfb_fence;
int cfb_plane;
+ int cfb_y;
int irq_enabled;
@@ -316,8 +327,7 @@ typedef struct drm_i915_private {
struct intel_overlay *overlay;
/* LVDS info */
- int backlight_duty_cycle; /* restore backlight to this value */
- bool panel_wants_dither;
+ int backlight_level; /* restore backlight to this value */
struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -328,13 +338,22 @@ typedef struct drm_i915_private {
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
- unsigned int edp_support:1;
int lvds_ssc_freq;
- int edp_bpp;
+
+ struct {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+
+ u8 initialized:1;
+ u8 support:1;
+ u8 bpp:6;
+ } edp;
struct notifier_block lid_notifier;
- int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
+ int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -344,6 +363,7 @@ typedef struct drm_i915_private {
spinlock_t error_lock;
struct drm_i915_error_state *first_error;
struct work_struct error_work;
+ struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
@@ -507,6 +527,11 @@ typedef struct drm_i915_private {
u32 saveMCHBAR_RENDER_STANDBY;
struct {
+ /** Bridge to intel-gtt-ko */
+ struct intel_gtt *gtt;
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm vram;
+ /** Memory allocator for GTT */
struct drm_mm gtt_space;
struct io_mapping *gtt_mapping;
@@ -521,8 +546,6 @@ typedef struct drm_i915_private {
*/
struct list_head shrink_list;
- spinlock_t active_list_lock;
-
/**
* List of objects which are not in the ringbuffer but which
* still have a write_domain which needs to be flushed before
@@ -555,6 +578,12 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
+ /**
+ * LRU list of objects which are not in the ringbuffer but
+ * are still pinned in the GTT.
+ */
+ struct list_head pinned_list;
+
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
@@ -611,6 +640,17 @@ typedef struct drm_i915_private {
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+ uint32_t flush_rings;
+
+ /* accounting, useful for userland debugging */
+ size_t object_memory;
+ size_t pin_memory;
+ size_t gtt_memory;
+ size_t gtt_total;
+ u32 object_count;
+ u32 pin_count;
+ u32 gtt_count;
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -626,8 +666,6 @@ typedef struct drm_i915_private {
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
- /* indicate whether the LVDS EDID is OK */
- bool lvds_edid_good;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
struct work_struct idle_work;
@@ -661,6 +699,8 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
+ unsigned long last_gpu_reset;
+
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
} drm_i915_private_t;
@@ -816,12 +856,14 @@ struct drm_i915_gem_request {
/** global list entry for this request */
struct list_head list;
+ struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
};
struct drm_i915_file_private {
struct {
+ struct spinlock lock;
struct list_head request_list;
} mm;
};
@@ -862,7 +904,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
-extern int i965_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev, u8 flags);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -871,7 +913,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
-void i915_destroy_error_state(struct drm_device *dev);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -908,6 +949,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
+#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+#else
+#define i915_destroy_error_state(x)
+#endif
+
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -922,6 +969,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
+int i915_gem_check_is_wedged(struct drm_device *dev);
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -972,13 +1020,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+ bool interruptible);
+int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+ bool interruptible);
void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
@@ -990,16 +1047,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- uint32_t flush_domains,
- struct intel_ring_buffer *ring);
+ struct drm_file *file_priv,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring);
int i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno, int interruptible,
- struct intel_ring_buffer *ring);
+ uint32_t seqno,
+ bool interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+ bool pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj,
int id,
@@ -1007,10 +1066,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
-void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
@@ -1032,15 +1088,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
-#if WATCH_INACTIVE
-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
#else
-#define i915_verify_inactive(dev, file, line)
+#define i915_verify_lists(dev) 0
#endif
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
-void i915_dump_lru(struct drm_device *dev, const char *where);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
@@ -1054,19 +1109,31 @@ extern int i915_restore_state(struct drm_device *dev);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
-/* i915_opregion.c */
-extern int intel_opregion_init(struct drm_device *dev, int resume);
-extern void intel_opregion_free(struct drm_device *dev, int suspend);
-extern void opregion_asle_intr(struct drm_device *dev);
-extern void ironlake_opregion_gse_intr(struct drm_device *dev);
-extern void opregion_enable_asle(struct drm_device *dev);
+extern void intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern void intel_opregion_gse_intr(struct drm_device *dev);
+extern void intel_opregion_enable_asle(struct drm_device *dev);
#else
-static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
-static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
-static inline void opregion_asle_intr(struct drm_device *dev) { return; }
-static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
-static inline void opregion_enable_asle(struct drm_device *dev) { return; }
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
#endif
/* modesetting */
@@ -1084,8 +1151,10 @@ extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/* overlay */
+#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+#endif
/**
* Lock test for when it's just for synchronization of ring access.
@@ -1099,8 +1168,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
LOCK_TEST_WITH_RETURN(dev, file_priv); \
} while (0)
-#define I915_READ(reg) readl(dev_priv->regs + (reg))
-#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg))
+static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val;
+
+ val = readl(dev_priv->regs + reg);
+ if (dev_priv->debug_flags & I915_DEBUG_READ)
+ printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+ return val;
+}
+
+static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writel(val, dev_priv->regs + reg);
+ if (dev_priv->debug_flags & I915_DEBUG_WRITE)
+ printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+}
+
+#define I915_READ(reg) i915_read(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
#define I915_READ16(reg) readw(dev_priv->regs + (reg))
#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
#define I915_READ8(reg) readb(dev_priv->regs + (reg))
@@ -1110,6 +1197,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define POSTING_READ(reg) (void)I915_READ(reg)
#define POSTING_READ16(reg) (void)I915_READ16(reg)
+#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
+ I915_DEBUG_WRITE)
+#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
+ I915_DEBUG_WRITE))
+
#define I915_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
@@ -1166,8 +1258,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
-#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
@@ -1179,7 +1269,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
-#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
@@ -1188,26 +1277,27 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
- !IS_GEN6(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4cdf74264ee8..100a7537980e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,7 +37,9 @@
#include <linux/intel-gtt.h>
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+ bool pipelined);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+ bool interruptible);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
struct drm_file *file_priv);
static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+static int
+i915_gem_object_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask);
+
+static void
+i915_gem_object_put_pages(struct drm_gem_object *obj);
+
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
+/* some bookkeeping */
+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.object_count++;
+ dev_priv->mm.object_memory += size;
+}
+
+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.object_count--;
+ dev_priv->mm.object_memory -= size;
+}
+
+static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.gtt_count++;
+ dev_priv->mm.gtt_memory += size;
+}
+
+static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.gtt_count--;
+ dev_priv->mm.gtt_memory -= size;
+}
+
+static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.pin_count++;
+ dev_priv->mm.pin_memory += size;
+}
+
+static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.pin_count--;
+ dev_priv->mm.pin_memory -= size;
+}
+
+int
+i915_gem_check_is_wedged(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct completion *x = &dev_priv->error_completion;
+ unsigned long flags;
+ int ret;
+
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return 0;
+
+ ret = wait_for_completion_interruptible(x);
+ if (ret)
+ return ret;
+
+ /* Success, we reset the GPU! */
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return 0;
+
+ /* GPU is hung, bump the completion count to account for
+ * the token we just consumed so that we never hit zero and
+ * end up waiting upon a subsequent completion event that
+ * will never happen.
+ */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ return -EIO;
+}
+
+static int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EAGAIN;
+ }
+
+ WARN_ON(i915_verify_lists(dev));
+ return 0;
+}
+
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
{
@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
obj_priv->pin_count == 0;
}
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+int i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
- dev->gtt_total = (uint32_t) (end - start);
+ dev_priv->mm.gtt_total = end - start;
return 0;
}
@@ -103,14 +209,16 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- args->aper_size = dev->gtt_total;
- args->aper_available_size = (args->aper_size -
- atomic_read(&dev->pin_memory));
+ mutex_lock(&dev->struct_mutex);
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -263,7 +371,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
@@ -382,7 +492,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto fail_put_user_pages;
ret = i915_gem_object_get_pages_or_evict(obj);
if (ret)
@@ -462,21 +574,27 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pread *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret;
+ int ret = 0;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -ENOENT;
obj_priv = to_intel_bo(obj);
- /* Bounds check source.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference_unlocked(obj);
- return -EINVAL;
+ /* Bounds check source. */
+ if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0)
+ goto out;
+
+ if (!access_ok(VERIFY_WRITE,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size)) {
+ ret = -EFAULT;
+ goto out;
}
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -488,8 +606,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
file_priv);
}
+out:
drm_gem_object_unreference_unlocked(obj);
-
return ret;
}
@@ -578,11 +696,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- if (!access_ok(VERIFY_READ, user_data, remain))
- return -EFAULT;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_pin(obj, 0);
if (ret) {
mutex_unlock(&dev->struct_mutex);
@@ -677,7 +795,10 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
goto out_unpin_pages;
}
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto out_unpin_pages;
+
ret = i915_gem_object_pin(obj, 0);
if (ret)
goto out_unlock;
@@ -751,7 +872,9 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
@@ -847,7 +970,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto fail_put_user_pages;
ret = i915_gem_object_get_pages_or_evict(obj);
if (ret)
@@ -932,14 +1057,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
obj_priv = to_intel_bo(obj);
- /* Bounds check destination.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference_unlocked(obj);
- return -EINVAL;
+ /* Bounds check destination. */
+ if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0)
+ goto out;
+
+ if (!access_ok(VERIFY_READ,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size)) {
+ ret = -EFAULT;
+ goto out;
}
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -951,7 +1082,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (obj_priv->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0 &&
+ obj_priv->gtt_space &&
obj->write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
if (ret == -EFAULT) {
@@ -973,8 +1104,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
DRM_INFO("pwrite failed %d\n", ret);
#endif
+out:
drm_gem_object_unreference_unlocked(obj);
-
return ret;
}
@@ -1015,14 +1146,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
obj_priv = to_intel_bo(obj);
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
intel_mark_busy(dev, obj);
-#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
- obj, obj->size, read_domains, write_domain);
-#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -1046,7 +1177,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
-
/* Maintain LRU order of "inactive" objects */
if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
@@ -1065,27 +1195,23 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_sw_finish *args = data;
struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
int ret = 0;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
+ if (obj == NULL)
return -ENOENT;
- }
-#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %zd)\n",
- __func__, args->handle, obj, obj->size);
-#endif
- obj_priv = to_intel_bo(obj);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
/* Pinned buffers may be scanout, so flush the cache */
- if (obj_priv->pin_count)
+ if (to_intel_bo(obj)->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(obj);
@@ -1177,7 +1303,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
+ ret = i915_gem_object_get_fence_reg(obj, true);
if (ret)
goto unlock;
}
@@ -1242,7 +1368,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
obj->size / PAGE_SIZE, 0, 0);
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOMEM;
+ ret = -ENOSPC;
goto out_free_list;
}
@@ -1254,9 +1380,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
}
list->hash.key = list->file_offset_node->start;
- if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
DRM_ERROR("failed to add to map hash\n");
- ret = -ENOMEM;
goto out_free_mm;
}
@@ -1341,14 +1467,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- if (IS_I9XX(dev))
+ if (INTEL_INFO(dev)->gen == 3)
start = 1024*1024;
else
start = 512*1024;
@@ -1390,7 +1516,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
obj_priv = to_intel_bo(obj);
@@ -1432,7 +1562,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return 0;
}
-void
+static void
i915_gem_object_put_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1466,13 +1596,24 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
obj_priv->pages = NULL;
}
+static uint32_t
+i915_gem_next_request_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ ring->outstanding_lazy_request = true;
+ return dev_priv->next_seqno;
+}
+
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+i915_gem_object_move_to_active(struct drm_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
+
BUG_ON(ring == NULL);
obj_priv->ring = ring;
@@ -1481,10 +1622,9 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
drm_gem_object_reference(obj);
obj_priv->active = 1;
}
+
/* Move from whatever list we were on to the tail of execution. */
- spin_lock(&dev_priv->mm.active_list_lock);
list_move_tail(&obj_priv->list, &ring->active_list);
- spin_unlock(&dev_priv->mm.active_list_lock);
obj_priv->last_rendering_seqno = seqno;
}
@@ -1534,9 +1674,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
- list_del_init(&obj_priv->list);
+ list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
@@ -1548,12 +1687,12 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
obj_priv->active = 0;
drm_gem_object_unreference(obj);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
}
static void
i915_gem_process_flushing_list(struct drm_device *dev,
- uint32_t flush_domains, uint32_t seqno,
+ uint32_t flush_domains,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1564,14 +1703,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
gpu_write_list) {
struct drm_gem_object *obj = &obj_priv->base;
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain &&
- obj_priv->ring->ring_flag == ring->ring_flag) {
+ if (obj->write_domain & flush_domains &&
+ obj_priv->ring == ring) {
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno, ring);
+ i915_gem_object_move_to_active(obj, ring);
/* update the fence lru list */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1589,23 +1727,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
uint32_t
-i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains, struct intel_ring_buffer *ring)
+i915_add_request(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = NULL;
- struct drm_i915_gem_request *request;
+ struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
- if (file_priv != NULL)
- i915_file_priv = file_priv->driver_priv;
+ if (file != NULL)
+ file_priv = file->driver_priv;
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
+ if (request == NULL) {
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return 0;
+ }
- seqno = ring->add_request(dev, ring, file_priv, flush_domains);
+ seqno = ring->add_request(dev, ring, 0);
+ ring->outstanding_lazy_request = false;
request->seqno = seqno;
request->ring = ring;
@@ -1613,23 +1755,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
- if (i915_file_priv) {
+ if (file_priv) {
+ spin_lock(&file_priv->mm.lock);
+ request->file_priv = file_priv;
list_add_tail(&request->client_list,
- &i915_file_priv->mm.request_list);
- } else {
- INIT_LIST_HEAD(&request->client_list);
+ &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
}
- /* Associate any objects on the flushing list matching the write
- * domain we're flushing with our flush.
- */
- if (flush_domains != 0)
- i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
-
if (!dev_priv->mm.suspended) {
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
if (was_empty)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work, HZ);
}
return seqno;
}
@@ -1640,91 +1779,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* Ensures that all commands in the ring are finished
* before signalling the CPU
*/
-static uint32_t
+static void
i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
{
uint32_t flush_domains = 0;
/* The sampler always gets flushed on i965 (sigh) */
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
ring->flush(dev, ring,
I915_GEM_DOMAIN_COMMAND, flush_domains);
- return flush_domains;
}
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- */
-static void
-i915_gem_retire_request(struct drm_device *dev,
- struct drm_i915_gem_request *request)
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = request->file_priv;
- trace_i915_gem_request_retire(dev, request->seqno);
+ if (!file_priv)
+ return;
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate.
- */
- spin_lock(&dev_priv->mm.active_list_lock);
- while (!list_empty(&request->ring->active_list)) {
- struct drm_gem_object *obj;
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
+}
+
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ kfree(request);
+ }
+
+ while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj_priv;
- obj_priv = list_first_entry(&request->ring->active_list,
+ obj_priv = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
list);
- obj = &obj_priv->base;
-
- /* If the seqno being retired doesn't match the oldest in the
- * list, then the oldest in the list must still be newer than
- * this seqno.
- */
- if (obj_priv->last_rendering_seqno != request->seqno)
- goto out;
-#if WATCH_LRU
- DRM_INFO("%s: retire %d moves to inactive list %p\n",
- __func__, request->seqno, obj);
-#endif
-
- if (obj->write_domain != 0)
- i915_gem_object_move_to_flushing(obj);
- else {
- /* Take a reference on the object so it won't be
- * freed while the spinlock is held. The list
- * protection for this spinlock is safe when breaking
- * the lock like this since the next thing we do
- * is just get the head of the list again.
- */
- drm_gem_object_reference(obj);
- i915_gem_object_move_to_inactive(obj);
- spin_unlock(&dev_priv->mm.active_list_lock);
- drm_gem_object_unreference(obj);
- spin_lock(&dev_priv->mm.active_list_lock);
- }
+ obj_priv->base.write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_inactive(&obj_priv->base);
}
-out:
- spin_unlock(&dev_priv->mm.active_list_lock);
}
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+void i915_gem_reset(struct drm_device *dev)
{
- return (int32_t)(seq1 - seq2) >= 0;
-}
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int i;
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- return ring->get_gem_seqno(dev, ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+
+ /* Remove anything from the flushing lists. The GPU cache is likely
+ * to be lost on reset along with the data, so simply move the
+ * lost bo to the inactive list.
+ */
+ while (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list);
+
+ obj_priv->base.write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_inactive(&obj_priv->base);
+ }
+
+ /* Move everything out of the GPU domains to ensure we do any
+ * necessary invalidation upon reuse.
+ */
+ list_for_each_entry(obj_priv,
+ &dev_priv->mm.inactive_list,
+ list)
+ {
+ obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ /* The fence registers are invalidated so clear them out */
+ for (i = 0; i < 16; i++) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ continue;
+
+ i915_gem_clear_fence_reg(reg->obj);
+ }
}
/**
@@ -1737,38 +1890,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!ring->status_page.page_addr
- || list_empty(&ring->request_list))
+ if (!ring->status_page.page_addr ||
+ list_empty(&ring->request_list))
return;
- seqno = i915_get_gem_seqno(dev, ring);
+ WARN_ON(i915_verify_lists(dev));
+ seqno = ring->get_seqno(dev, ring);
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
- uint32_t retiring_seqno;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
- retiring_seqno = request->seqno;
- if (i915_seqno_passed(seqno, retiring_seqno) ||
- atomic_read(&dev_priv->mm.wedged)) {
- i915_gem_retire_request(dev, request);
+ if (!i915_seqno_passed(seqno, request->seqno))
+ break;
+
+ trace_i915_gem_request_retire(dev, request->seqno);
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ kfree(request);
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ list);
- list_del(&request->list);
- list_del(&request->client_list);
- kfree(request);
- } else
+ if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
break;
+
+ obj = &obj_priv->base;
+ if (obj->write_domain != 0)
+ i915_gem_object_move_to_flushing(obj);
+ else
+ i915_gem_object_move_to_inactive(obj);
}
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-
ring->user_irq_put(dev, ring);
dev_priv->trace_irq_seqno = 0;
}
+
+ WARN_ON(i915_verify_lists(dev));
}
void
@@ -1795,7 +1968,7 @@ i915_gem_retire_requests(struct drm_device *dev)
i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
}
-void
+static void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
@@ -1805,7 +1978,12 @@ i915_gem_retire_work_handler(struct work_struct *work)
mm.retire_work.work);
dev = dev_priv->dev;
- mutex_lock(&dev->struct_mutex);
+ /* Come back later if the device is busy... */
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ return;
+ }
+
i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
@@ -1818,7 +1996,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
int
i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- int interruptible, struct intel_ring_buffer *ring)
+ bool interruptible, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1827,9 +2005,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
BUG_ON(seqno == 0);
if (atomic_read(&dev_priv->mm.wedged))
- return -EIO;
+ return -EAGAIN;
+
+ if (ring->outstanding_lazy_request) {
+ seqno = i915_add_request(dev, NULL, NULL, ring);
+ if (seqno == 0)
+ return -ENOMEM;
+ }
+ BUG_ON(seqno == dev_priv->next_seqno);
- if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1848,12 +2033,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
else
wait_event(ring->irq_queue,
i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
ring->user_irq_put(dev, ring);
@@ -1862,11 +2047,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
trace_i915_gem_request_wait_end(dev, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
+ ret = -EAGAIN;
if (ret && ret != -ERESTARTSYS)
- DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
+ DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+ __func__, ret, seqno, ring->get_seqno(dev, ring),
+ dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
@@ -1885,27 +2071,44 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
*/
static int
i915_wait_request(struct drm_device *dev, uint32_t seqno,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
return i915_do_wait_request(dev, seqno, 1, ring);
}
static void
+i915_gem_flush_ring(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ ring->flush(dev, ring, invalidate_domains, flush_domains);
+ i915_gem_process_flushing_list(dev, flush_domains, ring);
+}
+
+static void
i915_gem_flush(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t invalidate_domains,
- uint32_t flush_domains)
+ uint32_t flush_domains,
+ uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
- dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
- invalidate_domains,
- flush_domains);
- if (HAS_BSD(dev))
- dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
- invalidate_domains,
- flush_domains);
+ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+ if (flush_rings & RING_RENDER)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->render_ring,
+ invalidate_domains, flush_domains);
+ if (flush_rings & RING_BSD)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->bsd_ring,
+ invalidate_domains, flush_domains);
+ }
}
/**
@@ -1913,7 +2116,8 @@ i915_gem_flush(struct drm_device *dev,
* safe to unbind from the GTT or access from the CPU.
*/
static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1928,13 +2132,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
* it.
*/
if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_wait_request(dev,
- obj_priv->last_rendering_seqno, obj_priv->ring);
- if (ret != 0)
+ ret = i915_do_wait_request(dev,
+ obj_priv->last_rendering_seqno,
+ interruptible,
+ obj_priv->ring);
+ if (ret)
return ret;
}
@@ -1948,14 +2150,10 @@ int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
-#if WATCH_BUF
- DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
- DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
-#endif
if (obj_priv->gtt_space == NULL)
return 0;
@@ -1980,33 +2178,26 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
+ if (ret) {
+ i915_gem_clflush_object(obj);
+ obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+ }
/* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
- if (obj_priv->agp_mem != NULL) {
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
- obj_priv->agp_mem = NULL;
- }
+ drm_unbind_agp(obj_priv->agp_mem);
+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
i915_gem_object_put_pages(obj);
BUG_ON(obj_priv->pages_refcount);
- if (obj_priv->gtt_space) {
- atomic_dec(&dev->gtt_count);
- atomic_sub(obj->size, &dev->gtt_memory);
+ i915_gem_info_remove_gtt(dev_priv, obj->size);
+ list_del_init(&obj_priv->list);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- }
-
- /* Remove ourselves from the LRU list if present. */
- spin_lock(&dev_priv->mm.active_list_lock);
- if (!list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
- spin_unlock(&dev_priv->mm.active_list_lock);
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
@@ -2016,48 +2207,45 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+static int i915_ring_idle(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ i915_gem_flush_ring(dev, NULL, ring,
+ I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ return i915_wait_request(dev,
+ i915_gem_next_request_seqno(dev, ring),
+ ring);
+}
+
int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- uint32_t seqno1, seqno2;
int ret;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
(!HAS_BSD(dev) ||
list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
if (lists_empty)
return 0;
/* Flush everything onto the inactive list. */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
- &dev_priv->render_ring);
- if (seqno1 == 0)
- return -ENOMEM;
- ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+ ret = i915_ring_idle(dev, &dev_priv->render_ring);
+ if (ret)
+ return ret;
if (HAS_BSD(dev)) {
- seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
- &dev_priv->bsd_ring);
- if (seqno2 == 0)
- return -ENOMEM;
-
- ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+ ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
if (ret)
return ret;
}
-
- return ret;
+ return 0;
}
-int
+static int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
@@ -2237,7 +2425,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
-static int i915_find_fence_reg(struct drm_device *dev)
+static int i915_find_fence_reg(struct drm_device *dev,
+ bool interruptible)
{
struct drm_i915_fence_reg *reg = NULL;
struct drm_i915_gem_object *obj_priv = NULL;
@@ -2282,7 +2471,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
* private reference to obj like the other callers of put_fence_reg
* (set_tiling ioctl) do. */
drm_gem_object_reference(obj);
- ret = i915_gem_object_put_fence_reg(obj);
+ ret = i915_gem_object_put_fence_reg(obj, interruptible);
drm_gem_object_unreference(obj);
if (ret != 0)
return ret;
@@ -2304,7 +2493,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
* and tiling format.
*/
int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2339,7 +2529,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
break;
}
- ret = i915_find_fence_reg(dev);
+ ret = i915_find_fence_reg(dev, interruptible);
if (ret < 0)
return ret;
@@ -2398,7 +2588,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
break;
case 3:
- if (obj_priv->fence_reg > 8)
+ if (obj_priv->fence_reg >= 8)
fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
else
case 2:
@@ -2417,15 +2607,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
* i915_gem_object_put_fence_reg - waits on outstanding fenced access
* to the buffer to finish, and then resets the fence register.
* @obj: tiled object holding a fence register.
+ * @bool: whether the wait upon the fence is interruptible
*
* Zeroes out the fence register itself and clears out the associated
* data structures in dev_priv and obj_priv.
*/
int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_fence_reg *reg;
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
@@ -2440,20 +2634,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
*/
- if (!IS_I965G(dev)) {
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ if (reg->gpu) {
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
+ ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
+ ret = i915_gem_object_wait_rendering(obj, interruptible);
+ if (ret)
return ret;
+
+ reg->gpu = false;
}
i915_gem_object_flush_gtt_write_domain(obj);
- i915_gem_clear_fence_reg (obj);
+ i915_gem_clear_fence_reg(obj);
return 0;
}
@@ -2486,7 +2683,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->size > dev->gtt_total) {
+ if (obj->size > dev_priv->mm.gtt_total) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
@@ -2504,9 +2701,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
-#if WATCH_LRU
- DRM_INFO("%s: GTT full, evicting something\n", __func__);
-#endif
ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
@@ -2514,10 +2708,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
goto search_free;
}
-#if WATCH_BUF
- DRM_INFO("Binding object of size %zd at 0x%08x\n",
- obj->size, obj_priv->gtt_offset);
-#endif
ret = i915_gem_object_get_pages(obj, gfpmask);
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
@@ -2562,11 +2752,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
goto search_free;
}
- atomic_inc(&dev->gtt_count);
- atomic_add(obj->size, &dev->gtt_memory);
/* keep track of bounds object by adding it to the inactive list */
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ i915_gem_info_add_gtt(dev_priv, obj->size);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2599,25 +2788,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
/** Flushes any GPU write domain for the object if it's dirty. */
static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+ bool pipelined)
{
struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
- i915_gem_flush(dev, 0, obj->write_domain);
- if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
- return -ENOMEM;
+ i915_gem_flush_ring(dev, NULL,
+ to_intel_bo(obj)->ring,
+ 0, obj->write_domain);
+ BUG_ON(obj->write_domain);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
- return 0;
+
+ if (pipelined)
+ return 0;
+
+ return i915_gem_object_wait_rendering(obj, true);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2661,26 +2855,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
old_write_domain);
}
-int
-i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
-{
- int ret = 0;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- i915_gem_object_flush_gtt_write_domain(obj);
- break;
- case I915_GEM_DOMAIN_CPU:
- i915_gem_object_flush_cpu_write_domain(obj);
- break;
- default:
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- break;
- }
-
- return ret;
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
*
@@ -2698,32 +2872,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
if (ret != 0)
return ret;
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
- return ret;
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ if (write) {
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
- /* If we're writing through the GTT domain, then CPU and GPU caches
- * will need to be invalidated at next use.
- */
- if (write)
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
- i915_gem_object_flush_cpu_write_domain(obj);
-
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
}
@@ -2740,51 +2910,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+ bool pipelined)
{
- struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t old_write_domain, old_read_domains;
+ uint32_t old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj, true);
if (ret)
return ret;
- /* Wait on any GPU rendering and flushing to occur. */
- if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_do_wait_request(dev,
- obj_priv->last_rendering_seqno,
- 0,
- obj_priv->ring);
- if (ret != 0)
+ /* Currently, we are always called from an non-interruptible context. */
+ if (!pipelined) {
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
return ret;
}
i915_gem_object_flush_cpu_write_domain(obj);
- old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- old_write_domain);
+ obj->write_domain);
return 0;
}
@@ -2801,12 +2956,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
if (ret != 0)
return ret;
@@ -2817,6 +2967,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
+ if (write) {
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
+
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
@@ -2836,7 +2992,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
* need to be invalidated at next use.
*/
if (write) {
- obj->read_domains &= I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2962,7 +3118,7 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
@@ -2973,12 +3129,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
intel_mark_busy(dev, obj);
-#if WATCH_BUF
- DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
- __func__, obj,
- obj->read_domains, obj->pending_read_domains,
- obj->write_domain, obj->pending_write_domain);
-#endif
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
@@ -3005,13 +3155,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
-#if WATCH_BUF
- DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
- __func__, flush_domains, invalidate_domains);
-#endif
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- }
old_read_domains = obj->read_domains;
@@ -3025,21 +3170,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->pending_write_domain = obj->write_domain;
obj->read_domains = obj->pending_read_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS) {
- if (obj_priv->ring == &dev_priv->render_ring)
- dev_priv->flush_rings |= FLUSH_RENDER_RING;
- else if (obj_priv->ring == &dev_priv->bsd_ring)
- dev_priv->flush_rings |= FLUSH_BSD_RING;
- }
-
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
-#if WATCH_BUF
- DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
- __func__,
- obj->read_domains, obj->write_domain,
- dev->invalidate_domains, dev->flush_domains);
-#endif
+ if (obj_priv->ring)
+ dev_priv->mm.flush_rings |= obj_priv->ring->id;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -3102,12 +3236,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
if (ret != 0)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
@@ -3194,11 +3323,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
* properly handle blits to/from tiled surfaces.
*/
if (need_fence) {
- ret = i915_gem_object_get_fence_reg(obj);
+ ret = i915_gem_object_get_fence_reg(obj, true);
if (ret != 0) {
i915_gem_object_unpin(obj);
return ret;
}
+
+ dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
}
entry->offset = obj_priv->gtt_offset;
@@ -3256,6 +3387,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return -EINVAL;
}
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
@@ -3331,7 +3464,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
if (ret != 0) {
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
- return -EINVAL;
+ return ret;
}
/* Map the page containing the relocation we're going to
@@ -3346,11 +3479,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
(reloc_offset & (PAGE_SIZE - 1)));
reloc_val = target_obj_priv->gtt_offset + reloc->delta;
-#if WATCH_BUF
- DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
- obj, (unsigned int) reloc->offset,
- readl(reloc_entry), reloc_val);
-#endif
writel(reloc_val, reloc_entry);
io_mapping_unmap_atomic(reloc_page, KM_USER0);
@@ -3362,10 +3490,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
drm_gem_object_unreference(target_obj);
}
-#if WATCH_BUF
- if (0)
- i915_gem_dump_object(obj, 128, __func__, ~0);
-#endif
return 0;
}
@@ -3380,28 +3504,48 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
* relatively low latency when blocking on a particular request to finish.
*/
static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
- int ret = 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ struct drm_i915_gem_request *request;
+ struct intel_ring_buffer *ring = NULL;
+ u32 seqno = 0;
+ int ret;
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&i915_file_priv->mm.request_list,
- struct drm_i915_gem_request,
- client_list);
-
+ spin_lock(&file_priv->mm.lock);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ret = i915_wait_request(dev, request->seqno, request->ring);
- if (ret != 0)
- break;
+ ring = request->ring;
+ seqno = request->seqno;
}
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock(&file_priv->mm.lock);
+
+ if (seqno == 0)
+ return 0;
+
+ ret = 0;
+ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ /* And wait for the seqno passing without holding any locks and
+ * causing extra latency for others. This is safe as the irq
+ * generation is designed to be run atomically and so is
+ * lockless.
+ */
+ ring->user_irq_get(dev, ring);
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->user_irq_put(dev, ring);
+
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
+ }
+
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
return ret;
}
@@ -3537,8 +3681,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
return ret;
}
-
-int
+static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv,
struct drm_i915_gem_execbuffer2 *args,
@@ -3550,13 +3693,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_gem_relocation_entry *relocs = NULL;
- int ret = 0, ret2, i, pinned = 0;
+ struct drm_i915_gem_request *request = NULL;
+ int ret, ret2, i, pinned = 0;
uint64_t exec_offset;
- uint32_t seqno, flush_domains, reloc_index;
+ uint32_t reloc_index;
int pin_tries, flips;
struct intel_ring_buffer *ring = NULL;
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
+
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3603,20 +3751,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL) {
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
&relocs);
if (ret != 0)
goto pre_mutex_err;
- mutex_lock(&dev->struct_mutex);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- if (atomic_read(&dev_priv->mm.wedged)) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EIO;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
goto pre_mutex_err;
- }
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
@@ -3696,15 +3844,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
pinned+1, args->buffer_count,
total_size, num_fences,
ret);
- DRM_ERROR("%d objects [%d pinned], "
- "%d object bytes [%d pinned], "
- "%d/%d gtt bytes\n",
- atomic_read(&dev->object_count),
- atomic_read(&dev->pin_count),
- atomic_read(&dev->object_memory),
- atomic_read(&dev->pin_memory),
- atomic_read(&dev->gtt_memory),
- dev->gtt_total);
+ DRM_ERROR("%u objects [%u pinned, %u GTT], "
+ "%zu object bytes [%zu pinned], "
+ "%zu /%zu gtt bytes\n",
+ dev_priv->mm.object_count,
+ dev_priv->mm.pin_count,
+ dev_priv->mm.gtt_count,
+ dev_priv->mm.object_memory,
+ dev_priv->mm.pin_memory,
+ dev_priv->mm.gtt_memory,
+ dev_priv->mm.gtt_total);
}
goto err;
}
@@ -3737,15 +3886,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
/* Zero the global flush/invalidate flags. These
* will be modified as new domains are computed
* for each object
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
- dev_priv->flush_rings = 0;
+ dev_priv->mm.flush_rings = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -3754,8 +3901,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_object_set_to_gpu_domain(obj);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
@@ -3763,17 +3908,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dev->invalidate_domains,
dev->flush_domains);
#endif
- i915_gem_flush(dev,
+ i915_gem_flush(dev, file_priv,
dev->invalidate_domains,
- dev->flush_domains);
- if (dev_priv->flush_rings & FLUSH_RENDER_RING)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->render_ring);
- if (dev_priv->flush_rings & FLUSH_BSD_RING)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->bsd_ring);
+ dev->flush_domains,
+ dev_priv->mm.flush_rings);
}
for (i = 0; i < args->buffer_count; i++) {
@@ -3785,16 +3923,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (obj->write_domain)
list_move_tail(&obj_priv->gpu_write_list,
&dev_priv->mm.gpu_write_list);
- else
- list_del_init(&obj_priv->gpu_write_list);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
#if WATCH_COHERENCY
for (i = 0; i < args->buffer_count; i++) {
i915_gem_object_check_coherency(object_list[i],
@@ -3821,33 +3955,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires
*/
- flush_domains = i915_retire_commands(dev, ring);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ i915_retire_commands(dev, ring);
- /*
- * Get a seqno representing the execution of the current buffer,
- * which we can wait on. We would like to mitigate these interrupts,
- * likely by only creating seqnos occasionally (so that we have
- * *some* interrupts representing completion of buffers that we can
- * wait on when trying to clear up gtt space).
- */
- seqno = i915_add_request(dev, file_priv, flush_domains, ring);
- BUG_ON(seqno == 0);
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
obj_priv = to_intel_bo(obj);
- i915_gem_object_move_to_active(obj, seqno, ring);
-#if WATCH_LRU
- DRM_INFO("%s: move to exec list %p\n", __func__, obj);
-#endif
+ i915_gem_object_move_to_active(obj, ring);
}
-#if WATCH_LRU
- i915_dump_lru(dev, __func__);
-#endif
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ i915_add_request(dev, file_priv, request, ring);
+ request = NULL;
err:
for (i = 0; i < pinned; i++)
@@ -3880,6 +3998,7 @@ pre_mutex_err:
drm_free_large(object_list);
kfree(cliprects);
+ kfree(request);
return ret;
}
@@ -3936,7 +4055,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
- if (!IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
@@ -4033,12 +4152,12 @@ int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
if (obj_priv->gtt_space != NULL) {
if (alignment == 0)
@@ -4066,14 +4185,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
* remove it from the inactive list
*/
if (obj_priv->pin_count == 1) {
- atomic_inc(&dev->pin_count);
- atomic_add(obj->size, &dev->pin_memory);
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- list_del_init(&obj_priv->list);
+ i915_gem_info_add_pin(dev_priv, obj->size);
+ if (!obj_priv->active)
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.pinned_list);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -4084,7 +4202,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
obj_priv->pin_count--;
BUG_ON(obj_priv->pin_count < 0);
BUG_ON(obj_priv->gtt_space == NULL);
@@ -4094,14 +4212,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
* the inactive list
*/
if (obj_priv->pin_count == 0) {
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ if (!obj_priv->active)
list_move_tail(&obj_priv->list,
&dev_priv->mm.inactive_list);
- atomic_dec(&dev->pin_count);
- atomic_sub(obj->size, &dev->pin_memory);
+ i915_gem_info_remove_pin(dev_priv, obj->size);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
}
int
@@ -4113,17 +4229,20 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj_priv;
int ret;
- mutex_lock(&dev->struct_mutex);
-
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
args->handle);
- mutex_unlock(&dev->struct_mutex);
return -ENOENT;
}
obj_priv = to_intel_bo(obj);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
drm_gem_object_unreference(obj);
@@ -4168,18 +4287,23 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pin *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
-
- mutex_lock(&dev->struct_mutex);
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
args->handle);
- mutex_unlock(&dev->struct_mutex);
return -ENOENT;
}
obj_priv = to_intel_bo(obj);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
if (obj_priv->pin_filp != file_priv) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
@@ -4205,6 +4329,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
@@ -4213,7 +4338,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
@@ -4228,10 +4357,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain) {
- i915_gem_flush(dev, 0, obj->write_domain);
- (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
- }
+ if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(dev, file_priv,
+ obj_priv->ring,
+ 0, obj->write_domain);
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
@@ -4262,6 +4391,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_madvise *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4277,10 +4407,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
args->handle);
return -ENOENT;
}
-
- mutex_lock(&dev->struct_mutex);
obj_priv = to_intel_bo(obj);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
if (obj_priv->pin_count) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -4308,6 +4442,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -4319,6 +4454,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ i915_gem_info_add_obj(dev_priv, size);
+
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -4359,6 +4496,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
i915_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
+ i915_gem_info_remove_obj(dev_priv, obj->size);
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
@@ -4417,7 +4555,7 @@ i915_gem_idle(struct drm_device *dev)
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
- del_timer(&dev_priv->hangcheck_timer);
+ del_timer_sync(&dev_priv->hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
@@ -4497,28 +4635,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- dev_priv->render_ring = render_ring;
-
- if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
- memset(dev_priv->render_ring.status_page.page_addr,
- 0, PAGE_SIZE);
- }
-
if (HAS_PIPE_CONTROL(dev)) {
ret = i915_gem_init_pipe_control(dev);
if (ret)
return ret;
}
- ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ ret = intel_init_render_ring_buffer(dev);
if (ret)
goto cleanup_pipe_control;
if (HAS_BSD(dev)) {
- dev_priv->bsd_ring = bsd_ring;
- ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ ret = intel_init_bsd_ring_buffer(dev);
if (ret)
goto cleanup_render_ring;
}
@@ -4571,11 +4699,8 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- spin_lock(&dev_priv->mm.active_list_lock);
BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
@@ -4627,10 +4752,10 @@ i915_gem_load(struct drm_device *dev)
int i;
drm_i915_private_t *dev_priv = dev->dev_private;
- spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
@@ -4643,6 +4768,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
+ init_completion(&dev_priv->error_completion);
spin_lock(&shrink_list_lock);
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
@@ -4661,21 +4787,30 @@ i915_gem_load(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
- if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ break;
+ case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+ case 2:
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ break;
}
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4685,8 +4820,8 @@ i915_gem_load(struct drm_device *dev)
* Create a physically contiguous memory object for this object
* e.g. for cursor + overlay regs
*/
-int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size, int align)
+static int i915_gem_init_phys_object(struct drm_device *dev,
+ int id, int size, int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4718,7 +4853,7 @@ kfree_obj:
return ret;
}
-void i915_gem_free_phys_object(struct drm_device *dev, int id)
+static void i915_gem_free_phys_object(struct drm_device *dev, int id)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4863,18 +4998,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
return 0;
}
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list))
- list_del_init(i915_file_priv->mm.request_list.next);
- mutex_unlock(&dev->struct_mutex);
+ spin_lock(&file_priv->mm.lock);
+ while (!list_empty(&file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ spin_unlock(&file_priv->mm.lock);
}
static int
@@ -4883,12 +5025,10 @@ i915_gpu_is_active(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int lists_empty;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list);
if (HAS_BSD(dev))
lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
- spin_unlock(&dev_priv->mm.active_list_lock);
return !lists_empty;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 80f380b1d951..48644b840a8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -30,29 +30,112 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#if WATCH_INACTIVE
-void
-i915_verify_inactive(struct drm_device *dev, char *file, int line)
+#if WATCH_LISTS
+int
+i915_verify_lists(struct drm_device *dev)
{
+ static int warned;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = &obj_priv->base;
- if (obj_priv->pin_count || obj_priv->active ||
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)))
- DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ if (warned)
+ return 0;
+
+ list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed render active %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
+ DRM_ERROR("invalid render active %p (a %d r %x)\n",
+ obj,
+ obj->active,
+ obj->base.read_domains);
+ err++;
+ } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
+ DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
+ obj,
+ obj->base.write_domain,
+ !list_empty(&obj->gpu_write_list));
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed flushing %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
+ list_empty(&obj->gpu_write_list)){
+ DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
obj,
- obj_priv->pin_count, obj_priv->active,
- obj->write_domain, file, line);
+ obj->active,
+ obj->base.write_domain,
+ !list_empty(&obj->gpu_write_list));
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed gpu write %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
+ DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
+ obj,
+ obj->active,
+ obj->base.write_domain);
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed inactive %p\n", obj);
+ err++;
+ break;
+ } else if (obj->pin_count || obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+ DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
+ obj,
+ obj->pin_count, obj->active,
+ obj->base.write_domain);
+ err++;
+ }
}
+
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed pinned %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->pin_count || obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+ DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
+ obj,
+ obj->pin_count, obj->active,
+ obj->base.write_domain);
+ err++;
+ }
+ }
+
+ return warned = err;
}
#endif /* WATCH_INACTIVE */
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+#if WATCH_EXEC | WATCH_PWRITE
static void
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
uint32_t bias, uint32_t mark)
@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
}
#endif
-#if WATCH_LRU
-void
-i915_dump_lru(struct drm_device *dev, const char *where)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
-
- DRM_INFO("active list %s {\n", where);
- spin_lock(&dev_priv->mm.active_list_lock);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
- DRM_INFO("}\n");
- DRM_INFO("flushing list %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
- DRM_INFO("inactive %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
-}
-#endif
-
-
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e85246ef691c..3d7fbf32bb18 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -93,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
+ struct drm_i915_gem_object *obj_priv;
struct list_head *render_iter, *bsd_iter;
int ret = 0;
@@ -175,39 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
return -ENOSPC;
found:
+ /* drm_mm doesn't allow any other other operations while
+ * scanning, therefore store to be evicted objects on a
+ * temporary list. */
INIT_LIST_HEAD(&eviction_list);
- list_for_each_entry_safe(obj_priv, tmp_obj_priv,
- &unwind_list, evict_list) {
+ while (!list_empty(&unwind_list)) {
+ obj_priv = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ evict_list);
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- /* drm_mm doesn't allow any other other operations while
- * scanning, therefore store to be evicted objects on a
- * temporary list. */
list_move(&obj_priv->evict_list, &eviction_list);
- } else
- drm_gem_object_unreference(&obj_priv->base);
+ continue;
+ }
+ list_del(&obj_priv->evict_list);
+ drm_gem_object_unreference(&obj_priv->base);
}
/* Unbinding will emit any required flushes */
- list_for_each_entry_safe(obj_priv, tmp_obj_priv,
- &eviction_list, evict_list) {
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base);
-#endif
- ret = i915_gem_object_unbind(&obj_priv->base);
- if (ret)
- return ret;
-
+ while (!list_empty(&eviction_list)) {
+ obj_priv = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ evict_list);
+ if (ret == 0)
+ ret = i915_gem_object_unbind(&obj_priv->base);
+ list_del(&obj_priv->evict_list);
drm_gem_object_unreference(&obj_priv->base);
}
- /* The just created free hole should be on the top of the free stack
- * maintained by drm_mm, so this BUG_ON actually executes in O(1).
- * Furthermore all accessed data has just recently been used, so it
- * should be really fast, too. */
- BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
- alignment, 0));
-
- return 0;
+ return ret;
}
int
@@ -217,14 +212,11 @@ i915_gem_evict_everything(struct drm_device *dev)
int ret;
bool lists_empty;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
(!HAS_BSD(dev)
|| list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
if (lists_empty)
return -ENOSPC;
@@ -239,13 +231,11 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret)
return ret;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
(!HAS_BSD(dev)
|| list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!lists_empty);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 710eca70b323..8c9ffc4768ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -98,7 +98,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (!IS_I9XX(dev)) {
+ } else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (!IS_I9XX(dev) ||
+ if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
/* i965 stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ } else {
if (stride > 8192)
return false;
@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
/* 965+ just needs multiples of tile width */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (!IS_I965G(dev)) {
- if (obj_priv->gtt_offset & (obj->size - 1))
+ if (INTEL_INFO(dev)->gen >= 4)
+ return true;
+
+ if (obj_priv->gtt_offset & (obj->size - 1))
+ return false;
+
+ if (IS_GEN3(dev)) {
+ if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
+ } else {
+ if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
return false;
- if (IS_I9XX(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return false;
- }
}
return true;
@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
+
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
ret = i915_gem_object_unbind(obj);
else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- ret = i915_gem_object_put_fence_reg(obj);
+ ret = i915_gem_object_put_fence_reg(obj, true);
else
i915_gem_release_mmap(obj);
@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static int
+static void
i915_gem_swizzle_page(struct page *page)
{
+ char temp[64];
char *vaddr;
int i;
- char temp[64];
vaddr = kmap(page);
- if (vaddr == NULL)
- return -ENOMEM;
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
}
kunmap(page);
-
- return 0;
}
void
@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj_priv->bit_17) != 0)) {
- int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
- if (ret != 0) {
- DRM_ERROR("Failed to swizzle page\n");
- return;
- }
+ i915_gem_swizzle_page(obj_priv->pages[i]);
set_page_dirty(obj_priv->pages[i]);
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 744225ebb4b2..64c07c24e300 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
/* For display hotplug interrupt */
-void
+static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
else {
i915_enable_pipestat(dev_priv, 1,
PIPE_LEGACY_BLC_EVENT_ENABLE);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
@@ -191,12 +191,7 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-
- if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
- return 1;
-
- return 0;
+ return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
- u32 high1, high2, low, count;
-
- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+ u32 high1, high2, low;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return 0;
}
+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
/*
* High & low register fields aren't synchronized, so make sure
* we get a low value that's stable across two reads of the high
* register.
*/
do {
- high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
- low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
- PIPE_FRAME_LOW_SHIFT);
- high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
+ high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
+ high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
- count = (high1 << 8) | low;
-
- return count;
+ high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ low >>= PIPE_FRAME_LOW_SHIFT;
+ return (high1 << 8) | low;
}
u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
-
- if (mode_config->num_encoder) {
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->hot_plug)
- (*intel_encoder->hot_plug) (intel_encoder);
- }
- }
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->hot_plug)
+ encoder->hot_plug(encoder);
+
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
@@ -305,13 +293,17 @@ static void i915_handle_rps_change(struct drm_device *dev)
return;
}
-irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+ if (IS_GEN6(dev))
+ bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -335,28 +327,28 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
}
if (gt_iir & GT_PIPE_NOTIFY) {
- u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+ u32 seqno = render_ring->get_seqno(dev, render_ring);
render_ring->irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->render_ring.irq_queue);
dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
- if (gt_iir & GT_BSD_USER_INTERRUPT)
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
-
+ if (gt_iir & bsd_usr_interrupt)
+ wake_up_all(&dev_priv->bsd_ring.irq_queue);
if (de_iir & DE_GSE)
- ironlake_opregion_gse_intr(dev);
+ intel_opregion_gse_intr(dev);
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
}
if (de_iir & DE_PLANEB_FLIP_DONE) {
intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
}
if (de_iir & DE_PIPEA_VBLANK)
@@ -404,23 +396,20 @@ static void i915_error_work_func(struct work_struct *work)
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
- DRM_DEBUG_DRIVER("generating error event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) {
- if (IS_I965G(dev)) {
- DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i965_reset(dev, GDRST_RENDER)) {
- atomic_set(&dev_priv->mm.wedged, 0);
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
- }
- } else {
- DRM_DEBUG_DRIVER("reboot required\n");
+ DRM_DEBUG_DRIVER("resetting chip\n");
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+ if (!i915_reset(dev, GRDOM_RENDER)) {
+ atomic_set(&dev_priv->mm.wedged, 0);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
+ complete_all(&dev_priv->error_completion);
}
}
+#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src)
@@ -511,7 +500,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
if (IS_I830(dev) || IS_845G(dev))
cmd = MI_BATCH_BUFFER;
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
MI_BATCH_NON_SECURE_I965);
else
@@ -584,13 +573,16 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
+ DRM_DEBUG_DRIVER("generating error event\n");
+
+ error->seqno =
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
error->pipebstat = I915_READ(PIPEBSTAT);
error->instpm = I915_READ(INSTPM);
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
error->ipeir = I915_READ(IPEIR);
error->ipehr = I915_READ(IPEHR);
error->instdone = I915_READ(INSTDONE);
@@ -744,6 +736,9 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error)
i915_error_state_free(dev, error);
}
+#else
+#define i915_capture_error_state(x)
+#endif
static void i915_report_and_clear_eir(struct drm_device *dev)
{
@@ -785,7 +780,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
}
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
printk(KERN_ERR "page table error\n");
@@ -811,7 +806,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR "instruction error\n");
printk(KERN_ERR " INSTPM: 0x%08x\n",
I915_READ(INSTPM));
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
printk(KERN_ERR " IPEIR: 0x%08x\n",
@@ -876,12 +871,15 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev);
if (wedged) {
+ INIT_COMPLETION(dev_priv->error_completion);
atomic_set(&dev_priv->mm.wedged, 1);
/*
* Wakeup waiting processes so they don't hang
*/
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->render_ring.irq_queue);
+ if (HAS_BSD(dev))
+ wake_up_all(&dev_priv->bsd_ring.irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -912,7 +910,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj_priv = to_intel_bo(work->pending_flip_obj);
- if(IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
} else {
@@ -951,7 +949,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
iir = I915_READ(IIR);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
else
vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
@@ -1020,17 +1018,17 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT) {
- u32 seqno =
- render_ring->get_gem_seqno(dev, render_ring);
+ u32 seqno = render_ring->get_seqno(dev, render_ring);
render_ring->irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->render_ring.irq_queue);
dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ wake_up_all(&dev_priv->bsd_ring.irq_queue);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
@@ -1065,7 +1063,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
- opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -1207,18 +1205,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 pipeconf;
- pipeconf = I915_READ(pipeconf_reg);
- if (!(pipeconf & PIPEACONF_ENABLE))
+ if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@@ -1252,7 +1247,7 @@ void i915_enable_interrupt (struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_PCH_SPLIT(dev))
- opregion_enable_asle(dev);
+ intel_opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -1311,7 +1306,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-struct drm_i915_gem_request *
+static struct drm_i915_gem_request *
i915_get_tail_request(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1331,11 +1326,7 @@ void i915_hangcheck_elapsed(unsigned long data)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1;
- /* No reset support on this chip yet. */
- if (IS_GEN6(dev))
- return;
-
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
acthd = I915_READ(ACTHD);
instdone = I915_READ(INSTDONE);
instdone1 = 0;
@@ -1347,9 +1338,8 @@ void i915_hangcheck_elapsed(unsigned long data)
/* If all work is done then ACTHD clearly hasn't advanced. */
if (list_empty(&dev_priv->render_ring.request_list) ||
- i915_seqno_passed(i915_get_gem_seqno(dev,
- &dev_priv->render_ring),
- i915_get_tail_request(dev)->seqno)) {
+ i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
+ i915_get_tail_request(dev)->seqno)) {
bool missed_wakeup = false;
dev_priv->hangcheck_count = 0;
@@ -1357,13 +1347,13 @@ void i915_hangcheck_elapsed(unsigned long data)
/* Issue a wake-up to catch stuck h/w. */
if (dev_priv->render_ring.waiting_gem_seqno &&
waitqueue_active(&dev_priv->render_ring.irq_queue)) {
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->render_ring.irq_queue);
missed_wakeup = true;
}
if (dev_priv->bsd_ring.waiting_gem_seqno &&
waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ wake_up_all(&dev_priv->bsd_ring.irq_queue);
missed_wakeup = true;
}
@@ -1377,6 +1367,21 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+
+ if (!IS_GEN2(dev)) {
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ u32 tmp = I915_READ(PRB0_CTL);
+ if (tmp & RING_WAIT) {
+ I915_WRITE(PRB0_CTL, tmp);
+ POSTING_READ(PRB0_CTL);
+ goto out;
+ }
+ }
+
i915_handle_error(dev, true);
return;
}
@@ -1388,8 +1393,10 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 = instdone1;
}
+out:
/* Reset timer case chip hangs without another request being added */
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
/* drm_dma.h hooks
@@ -1436,17 +1443,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
- /* Gen6 only needs render pipe_control now */
if (IS_GEN6(dev))
- render_mask = GT_PIPE_NOTIFY;
+ render_mask = GT_PIPE_NOTIFY | GT_GEN6_BSD_USER_INTERRUPT;
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev)) {
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
+ I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+ }
+
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
@@ -1578,7 +1587,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
- opregion_enable_asle(dev);
+ intel_opregion_enable_asle(dev);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f5e15577e89..d02de212e6ad 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,52 +25,16 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define INTEL_GMCH_ENABLED 0x4
-#define INTEL_GMCH_MEM_MASK 0x1
-#define INTEL_GMCH_MEM_64M 0x1
-#define INTEL_GMCH_MEM_128M 0
-
-#define INTEL_GMCH_GMS_MASK (0xf << 4)
-#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-
-#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
/* PCI config space */
@@ -106,10 +70,13 @@
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define LBB 0xf4
-#define GDRST 0xc0
-#define GDRST_FULL (0<<2)
-#define GDRST_RENDER (1<<2)
-#define GDRST_MEDIA (3<<2)
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define GRDOM_FULL (0<<2)
+#define GRDOM_RENDER (1<<2)
+#define GRDOM_MEDIA (3<<2)
/* VGA stuff */
@@ -192,11 +159,11 @@
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
-
/*
* 3D instructions used by the kernel
*/
@@ -249,6 +216,16 @@
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830 0x6070
+#define DEBUG_RESET_FULL (1<<7)
+#define DEBUG_RESET_RENDER (1<<8)
+#define DEBUG_RESET_DISPLAY (1<<9)
+
+
/*
* Fence registers
*/
@@ -283,6 +260,16 @@
#define PRB0_HEAD 0x02034
#define PRB0_START 0x02038
#define PRB0_CTL 0x0203c
+#define RENDER_RING_BASE 0x02000
+#define BSD_RING_BASE 0x04000
+#define GEN6_BSD_RING_BASE 0x12000
+#define RING_TAIL(base) ((base)+0x30)
+#define RING_HEAD(base) ((base)+0x34)
+#define RING_START(base) ((base)+0x38)
+#define RING_CTL(base) ((base)+0x3c)
+#define RING_HWS_PGA(base) ((base)+0x80)
+#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define RING_ACTHD(base) ((base)+0x74)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -295,6 +282,8 @@
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
+#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
@@ -306,7 +295,6 @@
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
-#define HWS_PGA_GEN6 0x04080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -464,17 +452,17 @@
#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
-/*
- * BSD (bit stream decoder instruction and interrupt control register defines
- * (G4X and Ironlake only)
- */
-#define BSD_RING_TAIL 0x04030
-#define BSD_RING_HEAD 0x04034
-#define BSD_RING_START 0x04038
-#define BSD_RING_CTL 0x0403c
-#define BSD_RING_ACTHD 0x04074
-#define BSD_HWS_PGA 0x04080
+#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+
+#define GEN6_BSD_IMR 0x120a8
+#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
+
+#define GEN6_BSD_RNCID 0x12198
/*
* Framebuffer compression (915+ only)
@@ -579,12 +567,51 @@
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-#define GMBUS0 0x5100
-#define GMBUS1 0x5104
-#define GMBUS2 0x5108
-#define GMBUS3 0x510c
-#define GMBUS4 0x5110
-#define GMBUS5 0x5120
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+ /* 6 reserved */
+#define GMBUS_PORT_DPD 7 /* HDMID */
+#define GMBUS_NUM_PORTS 8
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
/*
* Clock control & power management
@@ -603,6 +630,7 @@
#define VGA1_PD_P1_MASK (0x1f << 8)
#define DPLL_A 0x06014
#define DPLL_B 0x06018
+#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -640,24 +668,6 @@
#define ADPA_DPMS_STANDBY (2<<10)
#define ADPA_DPMS_OFF (3<<10)
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
/* Scratch pad debug 0 reg:
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
@@ -736,10 +746,13 @@
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
#define FPA0 0x06040
#define FPA1 0x06044
#define FPB0 0x06048
#define FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
+#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
@@ -760,6 +773,7 @@
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE 0x6104
+#define DSTATE_GFX_RESET_I830 (1<<6)
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
@@ -926,6 +940,8 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
+#define TSC1 0x11001
+#define TSE (1<<0)
#define TR1 0x11006
#define TSFS 0x11020
#define TSFS_SLOPE_MASK 0x0000ff00
@@ -1070,6 +1086,8 @@
#define MEMSTAT_SRC_CTL_STDBY 3
#define RCPREVBSYTUPAVG 0x113b8
#define RCPREVBSYTDNAVG 0x113bc
+#define PMMISC 0x11214
+#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
#define SDEW 0x1124c
#define CSIEW0 0x11250
#define CSIEW1 0x11254
@@ -1150,6 +1168,15 @@
#define PIPEBSRC 0x6101c
#define BCLRPAT_B 0x61020
+#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
+#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+
/* VGA port control */
#define ADPA 0x61100
#define ADPA_DAC_ENABLE (1<<31)
@@ -1481,6 +1508,7 @@
# define TV_TEST_MODE_MASK (7 << 0)
#define TV_DAC 0x68004
+# define TV_DAC_SAVE 0x00ffff00
/**
* Reports that DAC state change logic has reported change (RO).
*
@@ -2075,29 +2103,35 @@
/* Display & cursor control */
-/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
-#define PIPE_DITHER_TYPE_MASK (3 << 2)
-#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
-#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
+#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
-#define PIPEACONF_ENABLE (1<<31)
-#define PIPEACONF_DISABLE 0
-#define PIPEACONF_DOUBLE_WIDE (1<<30)
+#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_DISABLE 0
+#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPEACONF_SINGLE_WIDE 0
-#define PIPEACONF_PIPE_UNLOCKED 0
-#define PIPEACONF_PIPE_LOCKED (1<<25)
-#define PIPEACONF_PALETTE 0
-#define PIPEACONF_GAMMA (1<<24)
+#define PIPECONF_SINGLE_WIDE 0
+#define PIPECONF_PIPE_UNLOCKED 0
+#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PALETTE 0
+#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_BPP_MASK (0x000000e0)
+#define PIPECONF_BPP_8 (0<<5)
+#define PIPECONF_BPP_10 (1<<5)
+#define PIPECONF_BPP_6 (2<<5)
+#define PIPECONF_BPP_12 (3<<5)
+#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define PIPECONF_DITHER_TYPE_SP (0<<2)
+#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2128,12 +2162,15 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
+#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
#define PIPE_8BPC (0 << 5)
#define PIPE_10BPC (1 << 5)
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
+#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
+#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
+
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@@ -2206,8 +2243,8 @@
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
-#define WM1_LP_FBC_LP1_MASK (0xf<<20)
-#define WM1_LP_FBC_LP1_SHIFT 20
+#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_FBC_SHIFT 20
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
@@ -2333,6 +2370,14 @@
#define DSPASURF 0x7019C /* 965+ only */
#define DSPATILEOFF 0x701A4 /* 965+ only */
+#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
@@ -2397,6 +2442,7 @@
#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
#define FDI_PLL_BIOS_0 0x46000
+#define FDI_PLL_FB_CLOCK_MASK 0xff
#define FDI_PLL_BIOS_1 0x46004
#define FDI_PLL_BIOS_2 0x46008
#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
@@ -2420,46 +2466,47 @@
#define PIPEA_DATA_M1 0x60030
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
-#define PIPEA_DATA_M1_OFFSET 0
+#define PIPE_DATA_M1_OFFSET 0
#define PIPEA_DATA_N1 0x60034
-#define PIPEA_DATA_N1_OFFSET 0
+#define PIPE_DATA_N1_OFFSET 0
#define PIPEA_DATA_M2 0x60038
-#define PIPEA_DATA_M2_OFFSET 0
+#define PIPE_DATA_M2_OFFSET 0
#define PIPEA_DATA_N2 0x6003c
-#define PIPEA_DATA_N2_OFFSET 0
+#define PIPE_DATA_N2_OFFSET 0
#define PIPEA_LINK_M1 0x60040
-#define PIPEA_LINK_M1_OFFSET 0
+#define PIPE_LINK_M1_OFFSET 0
#define PIPEA_LINK_N1 0x60044
-#define PIPEA_LINK_N1_OFFSET 0
+#define PIPE_LINK_N1_OFFSET 0
#define PIPEA_LINK_M2 0x60048
-#define PIPEA_LINK_M2_OFFSET 0
+#define PIPE_LINK_M2_OFFSET 0
#define PIPEA_LINK_N2 0x6004c
-#define PIPEA_LINK_N2_OFFSET 0
+#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
#define PIPEB_DATA_M1 0x61030
-#define PIPEB_DATA_M1_OFFSET 0
#define PIPEB_DATA_N1 0x61034
-#define PIPEB_DATA_N1_OFFSET 0
#define PIPEB_DATA_M2 0x61038
-#define PIPEB_DATA_M2_OFFSET 0
#define PIPEB_DATA_N2 0x6103c
-#define PIPEB_DATA_N2_OFFSET 0
#define PIPEB_LINK_M1 0x61040
-#define PIPEB_LINK_M1_OFFSET 0
#define PIPEB_LINK_N1 0x61044
-#define PIPEB_LINK_N1_OFFSET 0
#define PIPEB_LINK_M2 0x61048
-#define PIPEB_LINK_M2_OFFSET 0
#define PIPEB_LINK_N2 0x6104c
-#define PIPEB_LINK_N2_OFFSET 0
+
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
/* CPU panel fitter */
#define PFA_CTL_1 0x68080
@@ -2516,7 +2563,7 @@
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
#define GT_BSD_USER_INTERRUPT (1 << 5)
-
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -2600,11 +2647,14 @@
#define PCH_DPLL_A 0xc6014
#define PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
#define PCH_FPA0 0xc6040
#define PCH_FPA1 0xc6044
#define PCH_FPB0 0xc6048
#define PCH_FPB1 0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -2690,6 +2740,13 @@
#define TRANS_VBLANK_B 0xe1010
#define TRANS_VSYNC_B 0xe1014
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
+
#define TRANSB_DATA_M1 0xe1030
#define TRANSB_DATA_N1 0xe1034
#define TRANSB_DATA_M2 0xe1038
@@ -2701,6 +2758,7 @@
#define TRANSACONF 0xf0008
#define TRANSBCONF 0xf1008
+#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@@ -2725,6 +2783,7 @@
/* CPU: FDI_TX */
#define FDI_TXA_CTL 0x60100
#define FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -2766,8 +2825,8 @@
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define FDI_RXA_CTL 0xf000c
#define FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
-#define FDI_RX_DISABLE (0<<31)
/* train, dp width same as FDI_TX */
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_8BPC (0<<16)
@@ -2782,8 +2841,7 @@
#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_SEL_RAWCLK (0<<4)
-#define FDI_SEL_PCDCLK (1<<4)
+#define FDI_PCDCLK (1<<4)
/* CPT */
#define FDI_AUTO_TRAINING (1<<10)
#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
@@ -2798,6 +2856,9 @@
#define FDI_RXA_TUSIZE2 0xf0038
#define FDI_RXB_TUSIZE1 0xf1030
#define FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -2816,6 +2877,8 @@
#define FDI_RXA_IMR 0xf0018
#define FDI_RXB_IIR 0xf1014
#define FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
@@ -2935,6 +2998,7 @@
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 31f08581e93a..989c19d2d959 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
- if (IS_I965GM(dev) || IS_GM45(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
POSTING_READ(DPLL_A_MD);
}
@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
POSTING_READ(DPLL_B_MD);
}
@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
dev_priv->saveCURBPOS = I915_READ(CURBPOS);
dev_priv->saveCURBBASE = I915_READ(CURBBASE);
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
@@ -878,9 +878,7 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
- /* I2C state */
- intel_i2c_reset_gmbus(dev);
+ intel_i2c_reset(dev);
return 0;
}
-
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96f75d7f6633..b1f73ac0f3fd 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -129,10 +129,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
int i, temp_downclock;
struct drm_display_mode *temp_mode;
- /* Defaults if we can't find VBT info */
- dev_priv->lvds_dither = 0;
- dev_priv->lvds_vbt = 0;
-
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
@@ -140,6 +136,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
+
panel_type = lvds_options->panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
@@ -169,6 +166,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
((unsigned char *)entry + dvo_timing_offset);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
fill_detail_timing_data(panel_fixed_mode, dvo_timing);
@@ -230,8 +229,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
- dev_priv->sdvo_lvds_vbt_mode = NULL;
-
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
@@ -260,10 +257,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general;
- /* Set sensible defaults in case we can't find the general block */
- dev_priv->int_tv_support = 1;
- dev_priv->int_crt_support = 1;
-
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->int_tv_support = general->int_tv_support;
@@ -289,14 +282,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *general;
- const int crt_bus_map_table[] = {
- GPIOB,
- GPIOA,
- GPIOC,
- GPIOD,
- GPIOE,
- GPIOF,
- };
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
@@ -304,10 +289,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if ((bus_pin >= 1) && (bus_pin <= 6)) {
- dev_priv->crt_ddc_bus =
- crt_bus_map_table[bus_pin-1];
- }
+ if (bus_pin >= 1 && bus_pin <= 6)
+ dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
@@ -317,7 +300,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ struct bdb_header *bdb)
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
@@ -327,7 +310,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -377,7 +360,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->i2c_speed = p_child->i2c_speed;
p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin,
+ p_mapping->i2c_speed);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
@@ -409,14 +401,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver)
return;
- if (driver && SUPPORTS_EDP(dev) &&
- driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
- dev_priv->edp_support = 1;
- } else {
- dev_priv->edp_support = 0;
- }
+ if (SUPPORTS_EDP(dev) &&
+ driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
- if (driver && driver->dual_frequency)
+ if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
}
@@ -427,26 +416,40 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
- "supported, assume 18bpp panel color "
- "depth.\n");
- dev_priv->edp_bpp = 18;
+ "supported, assume %dbpp panel color "
+ "depth.\n",
+ dev_priv->edp.bpp);
}
return;
}
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
- dev_priv->edp_bpp = 18;
+ dev_priv->edp.bpp = 18;
break;
case EDP_24BPP:
- dev_priv->edp_bpp = 24;
+ dev_priv->edp.bpp = 24;
break;
case EDP_30BPP:
- dev_priv->edp_bpp = 30;
+ dev_priv->edp.bpp = 30;
break;
}
+
+ dev_priv->edp.rate = edp->link_params[panel_type].rate;
+ dev_priv->edp.lanes = edp->link_params[panel_type].lanes;
+ dev_priv->edp.preemphasis = edp->link_params[panel_type].preemphasis;
+ dev_priv->edp.vswing = edp->link_params[panel_type].vswing;
+
+ DRM_DEBUG_KMS("eDP vBIOS settings: bpp=%d, rate=%d, lanes=%d, preemphasis=%d, vswing=%d\n",
+ dev_priv->edp.bpp,
+ dev_priv->edp.rate,
+ dev_priv->edp.lanes,
+ dev_priv->edp.preemphasis,
+ dev_priv->edp.vswing);
+
+ dev_priv->edp.initialized = true;
}
static void
@@ -460,7 +463,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -513,6 +516,28 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
}
return;
}
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+ dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+ /* LFP panel data */
+ dev_priv->lvds_dither = 1;
+ dev_priv->lvds_vbt = 0;
+
+ /* SDVO panel data */
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ /* general features */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+ dev_priv->lvds_use_ssc = 0;
+
+ /* eDP data */
+ dev_priv->edp.bpp = 18;
+}
+
/**
* intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -520,11 +545,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
*
- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
- * feed an updated VBT back through that, compared to what we'll fetch using
- * this method of groping around in the BIOS data.
- *
* Returns 0 on success, nonzero on failure.
*/
bool
@@ -532,31 +552,47 @@ intel_init_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
- struct vbt_header *vbt = NULL;
- struct bdb_header *bdb;
- u8 __iomem *bios;
- size_t size;
- int i;
-
- bios = pci_map_rom(pdev, &size);
- if (!bios)
- return -1;
-
- /* Scour memory looking for the VBT signature */
- for (i = 0; i + 4 < size; i++) {
- if (!memcmp(bios + i, "$VBT", 4)) {
- vbt = (struct vbt_header *)(bios + i);
- break;
- }
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
+
+ init_vbt_defaults(dev_priv);
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
}
- if (!vbt) {
- DRM_ERROR("VBT signature missing\n");
- pci_unmap_rom(pdev, bios);
- return -1;
- }
+ if (bdb == NULL) {
+ struct vbt_header *vbt = NULL;
+ size_t size;
+ int i;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ if (!vbt) {
+ DRM_ERROR("VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ }
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
@@ -568,7 +604,8 @@ intel_init_bios(struct drm_device *dev)
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
- pci_unmap_rom(pdev, bios);
+ if (bios)
+ pci_unmap_rom(pdev, bios);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 4c18514f6f80..e1a598f2a966 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -197,7 +197,8 @@ struct bdb_general_features {
struct child_device_config {
u16 handle;
u16 device_type;
- u8 device_id[10]; /* See DEVICE_TYPE_* above */
+ u8 i2c_speed;
+ u8 rsvd[9];
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 197d4f32585a..389fcd2aea1f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
max_clock = 350000;
else
max_clock = 400000;
@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -187,7 +187,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(PCH_ADPA, adpa);
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000, 1))
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
@@ -244,7 +244,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* wait for FORCE_DETECT to go off */
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
- 1000, 1))
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
@@ -261,21 +261,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
+static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
+{
+ u8 buf;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0xA0,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf,
+ },
+ };
+ /* DDC monitor detect: Does it ACK a write to 0xA0? */
+ return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
+}
+
static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
/* CRT should always be at 0, but check anyway */
if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
return false;
- return intel_ddc_probe(intel_encoder);
+ if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
+ return true;
+ }
+
+ if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ return true;
+ }
+
+ return false;
}
static enum drm_connector_status
intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -295,6 +321,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
uint8_t st00;
enum drm_connector_status status;
+ DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
if (pipe == 0) {
bclrpat_reg = BCLRPAT_A;
vtotal_reg = VTOTAL_A;
@@ -324,9 +352,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+ POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev, pipe);
@@ -404,34 +433,37 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
- if (intel_crt_detect_hotplug(connector))
+ if (I915_HAS_HOTPLUG(dev)) {
+ if (intel_crt_detect_hotplug(connector)) {
+ DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
- else
+ } else
return connector_status_disconnected;
}
- if (intel_crt_detect_ddc(encoder))
+ if (intel_crt_detect_ddc(&encoder->base))
return connector_status_connected;
if (!force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (encoder->crtc && encoder->crtc->enabled) {
- status = intel_crt_load_detect(encoder->crtc, intel_encoder);
+ if (encoder->base.crtc && encoder->base.crtc->enabled) {
+ status = intel_crt_load_detect(encoder->base.crtc, encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ crtc = intel_get_load_detect_pipe(encoder, connector,
NULL, &dpms_mode);
if (crtc) {
- status = intel_crt_load_detect(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder,
+ if (intel_crt_detect_ddc(&encoder->base))
+ status = connector_status_connected;
+ else
+ status = intel_crt_load_detect(crtc, encoder);
+ intel_release_load_detect_pipe(encoder,
connector, dpms_mode);
} else
status = connector_status_unknown;
@@ -449,32 +481,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
- int ret;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
-
- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ ret = intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
if (ret || !IS_G4X(dev))
- goto end;
+ return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
-
- if (!ddc_bus) {
- dev_printk(KERN_ERR, &connector->dev->pdev->dev,
- "DDC bus registration failed for CRTDDC_D.\n");
- goto end;
- }
- /* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(connector, ddc_bus);
- intel_i2c_destroy(ddc_bus);
-
-end:
- return ret;
-
+ return intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,7 +525,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -520,7 +538,6 @@ void intel_crt_init(struct drm_device *dev)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 i2c_reg;
intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
if (!intel_encoder)
@@ -536,27 +553,10 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
-
- /* Set up the DDC bus. */
- if (HAS_PCH_SPLIT(dev))
- i2c_reg = PCH_GPIOA;
- else {
- i2c_reg = GPIOA;
- /* Use VBT information for CRT DDC if available */
- if (dev_priv->crt_ddc_bus != 0)
- i2c_reg = dev_priv->crt_ddc_bus;
- }
- intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
- if (!intel_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- return;
- }
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_ANALOG;
intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -566,7 +566,7 @@ void intel_crt_init(struct drm_device *dev)
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b5bf51a4502d..69c54c5a4254 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -43,8 +43,8 @@
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
-static void intel_crtc_update_cursor(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
typedef struct {
/* given values */
@@ -342,6 +342,13 @@ static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+}
+
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
@@ -701,16 +708,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
- } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_i9xx_lvds;
- else
- limit = &intel_limits_i9xx_sdvo;
} else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (!IS_GEN2(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i9xx_lvds;
+ else
+ limit = &intel_limits_i9xx_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
@@ -744,20 +751,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
/**
* Returns whether any output on the specified pipe is of the specified type
*/
-bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *l_entry;
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->base.crtc == crtc && encoder->type == type)
+ return true;
- list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
- if (l_entry && l_entry->crtc == crtc) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
- if (intel_encoder->type == type)
- return true;
- }
- }
- return false;
+ return false;
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -955,26 +959,26 @@ static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
- intel_clock_t clock;
- if (target < 200000) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 2;
- clock.m1 = 23;
- clock.m2 = 8;
- } else {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 1;
- clock.m1 = 14;
- clock.m2 = 2;
- }
- clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
- clock.p = (clock.p1 * clock.p2);
- clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
- clock.vco = 0;
- memcpy(best_clock, &clock, sizeof(intel_clock_t));
- return true;
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 2;
+ clock.m1 = 23;
+ clock.m2 = 8;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 14;
+ clock.m2 = 2;
+ }
+ clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+ clock.p = (clock.p1 * clock.p2);
+ clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+ clock.vco = 0;
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
}
/**
@@ -1007,14 +1011,14 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
/* Wait for vblank interrupt bit to set */
- if (wait_for((I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS),
- 50, 0))
+ if (wait_for(I915_READ(pipestat_reg) &
+ PIPE_VBLANK_INTERRUPT_STATUS,
+ 50))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
-/**
- * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
* @pipe: pipe to wait for
*
@@ -1022,28 +1026,41 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
* spinning on the vblank interrupt status bit, since we won't actually
* see an interrupt when the pipe is disabled.
*
- * So this function waits for the display line value to settle (it
- * usually ends up stopping at the start of the next frame).
+ * On Gen4 and above:
+ * wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ * wait for the display line value to settle (it usually
+ * ends up stopping at the start of the next frame).
+ *
*/
-void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
- u32 last_line;
-
- /* Wait for the display line to settle */
- do {
- last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
- mdelay(5);
- } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
- time_after(timeout, jiffies));
-
- if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("vblank wait timed out\n");
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int reg = PIPECONF(pipe);
+
+ /* Wait for the Pipe State to go off */
+ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+ 100))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ } else {
+ u32 last_line;
+ int reg = PIPEDSL(pipe);
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ /* Wait for the display line to settle */
+ do {
+ last_line = I915_READ(reg) & DSL_LINEMASK;
+ mdelay(5);
+ } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+ time_after(timeout, jiffies));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ }
}
-/* Parameters have changed, update FBC info */
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1055,6 +1072,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
int plane, i;
u32 fbc_ctl, fbc_ctl2;
+ if (fb->pitch == dev_priv->cfb_pitch &&
+ obj_priv->fence_reg == dev_priv->cfb_fence &&
+ intel_crtc->plane == dev_priv->cfb_plane &&
+ I915_READ(FBC_CONTROL) & FBC_CTL_EN)
+ return;
+
+ i8xx_disable_fbc(dev);
+
dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
if (fb->pitch < dev_priv->cfb_pitch)
@@ -1088,7 +1113,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
- dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+ dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
}
void i8xx_disable_fbc(struct drm_device *dev)
@@ -1096,19 +1121,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
- if (!I915_HAS_FBC(dev))
- return;
-
- if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
- return; /* Already off, just return */
-
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
@@ -1131,14 +1153,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
- DPFC_CTL_PLANEB);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+ dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+ POSTING_READ(DPFC_CONTROL);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
+ dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1148,7 +1183,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
}
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
@@ -1167,10 +1201,12 @@ void g4x_disable_fbc(struct drm_device *dev)
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- DRM_DEBUG_KMS("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
}
static bool g4x_fbc_enabled(struct drm_device *dev)
@@ -1188,16 +1224,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
- DPFC_CTL_PLANEB;
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+ dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_offset == obj_priv->gtt_offset &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+ POSTING_READ(ILK_DPFC_CONTROL);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
+ dev_priv->cfb_offset = obj_priv->gtt_offset;
+ dev_priv->cfb_y = crtc->y;
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1207,15 +1257,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
}
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
- DPFC_CTL_EN);
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
@@ -1227,10 +1275,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
- DRM_DEBUG_KMS("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
}
static bool ironlake_fbc_enabled(struct drm_device *dev)
@@ -1272,8 +1322,7 @@ void intel_disable_fbc(struct drm_device *dev)
/**
* intel_update_fbc - enable/disable FBC as needed
- * @crtc: CRTC to point the compressor at
- * @mode: mode in use
+ * @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
@@ -1290,18 +1339,14 @@ void intel_disable_fbc(struct drm_device *dev)
*
* We need to enable/disable FBC on a global basis.
*/
-static void intel_update_fbc(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+static void intel_update_fbc(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv;
- struct drm_crtc *tmp_crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane;
- int crtcs_enabled = 0;
DRM_DEBUG_KMS("\n");
@@ -1311,12 +1356,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (!I915_HAS_FBC(dev))
return;
- if (!crtc->fb)
- return;
-
- intel_fb = to_intel_framebuffer(fb);
- obj_priv = to_intel_bo(intel_fb->obj);
-
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
@@ -1327,35 +1366,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled)
- crtcs_enabled++;
+ if (tmp_crtc->enabled) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
}
- DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
- if (crtcs_enabled > 1) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj_priv = to_intel_bo(intel_fb->obj);
+
if (intel_fb->obj->size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
+ "compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
+ "disabling\n");
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
- if ((mode->hdisplay > 2048) ||
- (mode->vdisplay > 1536)) {
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
- if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
@@ -1370,18 +1421,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (in_dbg_master())
goto out_disable;
- if (intel_fbc_enabled(dev)) {
- /* We can re-enable it in this case, but need to update pitch */
- if ((fb->pitch > dev_priv->cfb_pitch) ||
- (obj_priv->fence_reg != dev_priv->cfb_fence) ||
- (plane != dev_priv->cfb_plane))
- intel_disable_fbc(dev);
- }
-
- /* Now try to turn it back on if possible */
- if (!intel_fbc_enabled(dev))
- intel_enable_fbc(crtc, 500);
-
+ intel_enable_fbc(crtc, 500);
return;
out_disable:
@@ -1393,7 +1433,9 @@ out_disable:
}
int
-intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ bool pipelined)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
@@ -1403,7 +1445,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
else
alignment = 64 * 1024;
@@ -1421,9 +1463,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
}
ret = i915_gem_object_pin(obj, alignment);
- if (ret != 0)
+ if (ret)
return ret;
+ ret = i915_gem_object_set_to_display_plane(obj, pipelined);
+ if (ret)
+ goto err_unpin;
+
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
@@ -1431,14 +1477,16 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
*/
if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
- }
+ ret = i915_gem_object_get_fence_reg(obj, false);
+ if (ret)
+ goto err_unpin;
}
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+ return ret;
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -1454,12 +1502,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
- int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
+ u32 reg;
switch (plane) {
case 0:
@@ -1474,7 +1518,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
obj = intel_fb->obj;
obj_priv = to_intel_bo(obj);
- dspcntr = I915_READ(dspcntr_reg);
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
@@ -1495,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
DRM_ERROR("Unknown color depth\n");
return -EINVAL;
}
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
if (obj_priv->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
@@ -1506,28 +1551,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- I915_WRITE(dspcntr_reg, dspcntr);
+ I915_WRITE(reg, dspcntr);
Start = obj_priv->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitch);
- I915_WRITE(dspstride, fb->pitch);
- if (IS_I965G(dev)) {
- I915_WRITE(dspsurf, Start);
- I915_WRITE(dsptileoff, (y << 16) | x);
- I915_WRITE(dspbase, Offset);
- } else {
- I915_WRITE(dspbase, Start + Offset);
- }
- POSTING_READ(dspbase);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DSPSURF(plane), Start);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPADDR(plane), Offset);
+ } else
+ I915_WRITE(DSPADDR(plane), Start + Offset);
+ POSTING_READ(reg);
- if (IS_I965G(dev) || plane == 0)
- intel_update_fbc(crtc, &crtc->mode);
-
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_increase_pllclock(crtc, true);
+ intel_update_fbc(dev);
+ intel_increase_pllclock(crtc);
return 0;
}
@@ -1539,11 +1580,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
int ret;
/* no fb bound */
@@ -1552,45 +1588,41 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (plane) {
+ switch (intel_crtc->plane) {
case 0:
case 1:
break;
default:
- DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
- intel_fb = to_intel_framebuffer(crtc->fb);
- obj = intel_fb->obj;
- obj_priv = to_intel_bo(obj);
-
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(crtc->fb)->obj,
+ false);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = i915_gem_object_set_to_display_plane(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ if (old_fb) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&obj_priv->pending_flip) == 0);
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
if (ret) {
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
- if (old_fb) {
- intel_fb = to_intel_framebuffer(old_fb);
- obj_priv = to_intel_bo(intel_fb->obj);
- i915_gem_object_unpin(intel_fb->obj);
- }
+ if (old_fb)
+ i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -1601,7 +1633,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (!master_priv->sarea_priv)
return 0;
- if (pipe) {
+ if (intel_crtc->pipe) {
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
} else {
@@ -1612,7 +1644,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1645,6 +1677,7 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
}
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
udelay(500);
}
@@ -1655,84 +1688,84 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
- u32 temp, tries = 0;
+ u32 reg, temp, tries;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
- temp = I915_READ(fdi_rx_imr_reg);
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
+ I915_WRITE(reg, temp);
+ I915_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
udelay(150);
+ reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(fdi_rx_iir_reg);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
DRM_DEBUG_KMS("FDI train 1 done.\n");
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
- DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
- udelay(150);
+ I915_WRITE(reg, temp);
- tries = 0;
+ POSTING_READ(reg);
+ udelay(150);
+ reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(fdi_rx_iir_reg);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (tries == 5)
- DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done\n");
}
-static int snb_b_fdi_train_param [] = {
+static const int const snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -1746,24 +1779,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
- u32 temp, i;
+ u32 reg, temp, i;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
- temp = I915_READ(fdi_rx_imr_reg);
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1771,10 +1802,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -1782,32 +1813,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(500);
- temp = I915_READ(fdi_rx_iir_reg);
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
if (IS_GEN6(dev)) {
@@ -1815,9 +1851,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -1825,535 +1862,611 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
- I915_WRITE(fdi_rx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(500);
- temp = I915_READ(fdi_rx_iir_reg);
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void ironlake_fdi_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
- int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
- int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
- int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
- int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
- int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
- int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
- int trans_dpll_sel = (pipe == 0) ? 0 : 1;
- u32 temp;
- u32 pipe_bpc;
-
- temp = I915_READ(pipeconf_reg);
- pipe_bpc = temp & PIPE_BPC_MASK;
+ u32 reg, temp;
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ /* Write the TU size bits so error detection works */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0) {
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- POSTING_READ(PCH_LVDS);
- }
- }
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~((0x7 << 19) | (0x7 << 16));
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
- if (!HAS_eDP) {
+ POSTING_READ(reg);
+ udelay(200);
- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
- temp = I915_READ(fdi_rx_reg);
- /*
- * make the BPC in FDI Rx be consistent with that in
- * pipeconf reg.
- */
- temp &= ~(0x7 << 16);
- temp |= (pipe_bpc << 11);
- temp &= ~(7 << 19);
- temp |= (intel_crtc->fdi_lanes - 1) << 19;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
- I915_READ(fdi_rx_reg);
- udelay(200);
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_PCDCLK);
- /* Switch from Rawclk to PCDclk */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
- I915_READ(fdi_rx_reg);
- udelay(200);
+ POSTING_READ(reg);
+ udelay(200);
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
- }
- }
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- /* Enable panel fitting for LVDS */
- if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
- || HAS_eDP || intel_pch_has_edp(crtc))) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
- PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
- dev_priv->pch_pf_pos);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
- dev_priv->pch_pf_size);
- }
+ POSTING_READ(reg);
+ udelay(100);
+ }
+}
- /* Enable CPU pipe */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- udelay(100);
- }
+static void intel_flush_display_plane(struct drm_device *dev,
+ int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = DSPADDR(plane);
+ I915_WRITE(reg, I915_READ(reg));
+}
- /* configure and enable CPU plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+/*
+ * When we disable a pipe, we need to clear any pending scanline wait events
+ * to avoid hanging the ring, which we assume we are waiting on.
+ */
+static void intel_clear_scanline_wait(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
- if (!HAS_eDP) {
- /* For PCH output, training FDI link */
- if (IS_GEN6(dev))
- gen6_fdi_link_train(crtc);
- else
- ironlake_fdi_link_train(crtc);
+ if (IS_GEN2(dev))
+ /* Can't break the hang on i8xx */
+ return;
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
- udelay(200);
+ tmp = I915_READ(PRB0_CTL);
+ if (tmp & RING_WAIT) {
+ I915_WRITE(PRB0_CTL, tmp);
+ POSTING_READ(PRB0_CTL);
+ }
+}
- if (HAS_PCH_CPT(dev)) {
- /* Be sure PCH DPLL SEL is set */
- temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0 &&
- (temp & TRANSA_DPLL_ENABLE) == 0)
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- else if (trans_dpll_sel == 1 &&
- (temp & TRANSB_DPLL_ENABLE) == 0)
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
- }
+static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv;
- /* set transcoder timing */
- I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
- I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
- I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
-
- I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
- I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
- I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
-
- /* enable normal train */
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
+ if (crtc->fb == NULL)
+ return;
- /* wait one idle pattern time */
- udelay(100);
+ obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+ dev_priv = crtc->dev->dev_private;
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&obj_priv->pending_flip) == 0);
+}
- /* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
- int reg;
-
- reg = I915_READ(trans_dp_ctl);
- reg &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
- reg |= (TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING);
-
- if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
- reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
- if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
- reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
-
- switch (intel_trans_dp_port_sel(crtc)) {
- case PCH_DP_B:
- reg |= TRANS_DP_PORT_SEL_B;
- break;
- case PCH_DP_C:
- reg |= TRANS_DP_PORT_SEL_C;
- break;
- case PCH_DP_D:
- reg |= TRANS_DP_PORT_SEL_D;
- break;
- default:
- DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
- reg |= TRANS_DP_PORT_SEL_B;
- break;
- }
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
- I915_WRITE(trans_dp_ctl, reg);
- POSTING_READ(trans_dp_ctl);
- }
+ if (intel_crtc->active)
+ return;
- /* enable PCH transcoder */
- temp = I915_READ(transconf_reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
- I915_READ(transconf_reg);
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
- if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
- DRM_ERROR("failed to enable transcoder\n");
- }
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
- intel_crtc_load_lut(crtc);
+ ironlake_fdi_enable(crtc);
- intel_update_fbc(crtc, &crtc->mode);
- break;
+ /* Enable panel fitting for LVDS */
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || HAS_eDP || intel_pch_has_edp(crtc))) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+ dev_priv->pch_pf_pos);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+ dev_priv->pch_pf_size);
+ }
+
+ /* Enable CPU pipe */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if ((temp & PIPECONF_ENABLE) == 0) {
+ I915_WRITE(reg, temp | PIPECONF_ENABLE);
+ POSTING_READ(reg);
+ udelay(100);
+ }
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ /* configure and enable CPU plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
+
+ /* For PCH output, training FDI link */
+ if (IS_GEN6(dev))
+ gen6_fdi_link_train(crtc);
+ else
+ ironlake_fdi_link_train(crtc);
+
+ /* enable PCH DPLL */
+ reg = PCH_DPLL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+ POSTING_READ(reg);
+ udelay(200);
+ }
+
+ if (HAS_PCH_CPT(dev)) {
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
+
+ /* set transcoder timing */
+ I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
+ I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
+ I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
+
+ I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
+ I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
+ I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ I915_WRITE(reg, temp);
- drm_vblank_off(dev, pipe);
- /* Disable display plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- I915_READ(dspbase_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK);
+ temp |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ temp |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ temp |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
}
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ I915_WRITE(reg, temp);
+ }
- /* disable cpu pipe, disable after all planes disabled */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ /* enable PCH transcoder */
+ reg = TRANSCONF(pipe);
+ temp = I915_READ(reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ I915_WRITE(reg, temp | TRANS_ENABLE);
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("failed to enable transcoder\n");
- /* wait for cpu pipe off, pipe state */
- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
- DRM_ERROR("failed to turn off cpu pipe\n");
- } else
- DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ intel_crtc_load_lut(crtc);
+ intel_update_fbc(dev);
+ intel_crtc_update_cursor(crtc, true);
+}
- udelay(100);
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
- /* Disable PF */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+ if (!intel_crtc->active)
+ return;
- /* disable CPU FDI tx and PCH FDI rx */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
- I915_READ(fdi_tx_reg);
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
- temp = I915_READ(fdi_rx_reg);
- /* BPC in FDI rx is consistent with that in pipeconf */
- temp &= ~(0x07 << 16);
- temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ /* Disable display plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if (temp & DISPLAY_PLANE_ENABLE) {
+ I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
- udelay(100);
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
- /* still set train pattern 1 */
- temp = I915_READ(fdi_tx_reg);
+ /* disable cpu pipe, disable after all planes disabled */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if (temp & PIPECONF_ENABLE) {
+ I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
+ /* wait for cpu pipe off, pipe state */
+ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50))
+ DRM_ERROR("failed to turn off cpu pipe\n");
+ }
+
+ /* Disable PF */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- POSTING_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- I915_WRITE(fdi_rx_reg, temp);
- POSTING_READ(fdi_rx_reg);
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
- udelay(100);
+ POSTING_READ(reg);
+ udelay(100);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if (temp & LVDS_PORT_EN) {
I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
- I915_READ(PCH_LVDS);
+ POSTING_READ(PCH_LVDS);
udelay(100);
}
+ }
- /* disable PCH transcoder */
- temp = I915_READ(transconf_reg);
- if ((temp & TRANS_ENABLE) != 0) {
- I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
+ /* disable PCH transcoder */
+ reg = TRANSCONF(plane);
+ temp = I915_READ(reg);
+ if (temp & TRANS_ENABLE) {
+ I915_WRITE(reg, temp & ~TRANS_ENABLE);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("failed to disable transcoder\n");
+ }
- /* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
- DRM_ERROR("failed to disable transcoder\n");
- }
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ I915_WRITE(reg, temp);
- temp = I915_READ(transconf_reg);
- /* BPC in transcoder is consistent with that in pipeconf */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp);
- I915_READ(transconf_reg);
- udelay(100);
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (pipe == 0)
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ else
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
- if (HAS_PCH_CPT(dev)) {
- /* disable TRANS_DP_CTL */
- int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
- int reg;
+ /* disable PCH DPLL */
+ reg = PCH_DPLL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
- reg = I915_READ(trans_dp_ctl);
- reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
- I915_WRITE(trans_dp_ctl, reg);
- POSTING_READ(trans_dp_ctl);
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
- /* disable DPLL_SEL */
- temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0)
- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
- else
- temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
- }
+ POSTING_READ(reg);
+ udelay(100);
- /* disable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
-
- /* Switch from PCDclk to Rawclk */
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_SEL_PCDCLK;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
-
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
- /* Wait for the clocks to turn off. */
- udelay(100);
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+ intel_update_fbc(dev);
+ intel_clear_scanline_wait(dev);
+}
+
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ ironlake_crtc_enable(crtc);
+ break;
+
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ ironlake_crtc_disable(crtc);
break;
}
}
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
- struct intel_overlay *overlay;
- int ret;
-
if (!enable && intel_crtc->overlay) {
- overlay = intel_crtc->overlay;
- mutex_lock(&overlay->dev->struct_mutex);
- for (;;) {
- ret = intel_overlay_switch_off(overlay);
- if (ret == 0)
- break;
+ struct drm_device *dev = intel_crtc->base.dev;
- ret = intel_overlay_recover_from_interrupt(overlay, 0);
- if (ret != 0) {
- /* overlay doesn't react anymore. Usually
- * results in a black screen and an unkillable
- * X server. */
- BUG();
- overlay->hw_wedged = HW_WEDGED;
- break;
- }
- }
- mutex_unlock(&overlay->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
+ (void) intel_overlay_switch_off(intel_crtc->overlay, false);
+ mutex_unlock(&dev->struct_mutex);
}
- /* Let userspace switch the overlay on again. In most cases userspace
- * has to recompute where to put it anyway. */
- return;
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
}
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 temp;
+ u32 reg, temp;
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = I915_READ(dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(dpll_reg, temp);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
+ if (intel_crtc->active)
+ return;
- /* Enable the pipe */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0)
- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
-
- /* Enable the plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
- intel_crtc_load_lut(crtc);
+ /* Enable the DPLL */
+ reg = DPLL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(reg, temp);
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
- /* Give the overlay scaler a chance to enable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, true);
- break;
- case DRM_MODE_DPMS_OFF:
- /* Give the overlay scaler a chance to disable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, false);
- drm_vblank_off(dev, pipe);
-
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
-
- /* Disable display plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- I915_READ(dspbase_reg);
- }
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+ }
+
+ /* Enable the pipe */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if ((temp & PIPECONF_ENABLE) == 0)
+ I915_WRITE(reg, temp | PIPECONF_ENABLE);
+
+ /* Enable the plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
+
+ intel_crtc_load_lut(crtc);
+ intel_update_fbc(dev);
+
+ /* Give the overlay scaler a chance to enable if it's on this pipe */
+ intel_crtc_dpms_overlay(intel_crtc, true);
+ intel_crtc_update_cursor(crtc, true);
+}
+
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
+
+ if (!intel_crtc->active)
+ return;
+
+ /* Give the overlay scaler a chance to disable if it's on this pipe */
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ intel_crtc_update_cursor(crtc, false);
+
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
+ /* Disable display plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if (temp & DISPLAY_PLANE_ENABLE) {
+ I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ intel_flush_display_plane(dev, plane);
/* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank_off(dev, pipe);
-
- /* Don't disable pipe A or pipe A PLLs if needed */
- if (pipeconf_reg == PIPEACONF &&
- (dev_priv->quirks & QUIRK_PIPEA_FORCE))
- goto skip_pipe_off;
-
- /* Next, disable display pipes */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- }
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
+ }
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank_off(dev, pipe);
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ goto done;
+
+ /* Next, disable display pipes */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if (temp & PIPECONF_ENABLE) {
+ I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
+
+ /* Wait for the pipe to turn off */
+ POSTING_READ(reg);
+ intel_wait_for_pipe_off(dev, pipe);
+ }
+
+ reg = DPLL(pipe);
+ temp = I915_READ(reg);
+ if (temp & DPLL_VCO_ENABLE) {
+ I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
- temp = I915_READ(dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- }
- skip_pipe_off:
/* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
udelay(150);
+ }
+
+done:
+ intel_crtc->active = false;
+ intel_update_fbc(dev);
+ intel_update_watermarks(dev);
+ intel_clear_scanline_wait(dev);
+}
+
+static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ i9xx_crtc_enable(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ i9xx_crtc_disable(crtc);
break;
}
}
@@ -2374,26 +2487,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
return;
intel_crtc->dpms_mode = mode;
- intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
-
- /* When switching on the display, ensure that SR is disabled
- * with multiple pipes prior to enabling to new pipe.
- *
- * When switching off the display, make sure the cursor is
- * properly hidden prior to disabling the pipe.
- */
- if (mode == DRM_MODE_DPMS_ON)
- intel_update_watermarks(dev);
- else
- intel_crtc_update_cursor(crtc);
dev_priv->display.dpms(crtc, mode);
- if (mode == DRM_MODE_DPMS_ON)
- intel_crtc_update_cursor(crtc);
- else
- intel_update_watermarks(dev);
-
if (!dev->primary->master)
return;
@@ -2418,16 +2514,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-static void intel_crtc_prepare (struct drm_crtc *crtc)
+static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
-static void intel_crtc_commit (struct drm_crtc *crtc)
+/* Prepare for a mode set.
+ *
+ * Note we could be a lot smarter here. We need to figure out which outputs
+ * will be enabled, which disabled (in short, how the config will changes)
+ * and perform the minimum necessary steps to accomplish that, e.g. updating
+ * watermarks, FBC configuration, making sure PLLs are programmed correctly,
+ * panel fitting is in the proper state, etc.
+ */
+static void i9xx_crtc_prepare(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ i9xx_crtc_disable(crtc);
+}
+
+static void i9xx_crtc_commit(struct drm_crtc *crtc)
+{
+ i9xx_crtc_enable(crtc);
+}
+
+static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+{
+ ironlake_crtc_disable(crtc);
+}
+
+static void ironlake_crtc_commit(struct drm_crtc *crtc)
+{
+ ironlake_crtc_enable(crtc);
}
void intel_encoder_prepare (struct drm_encoder *encoder)
@@ -2446,13 +2572,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
void intel_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
@@ -2543,33 +2663,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133000;
}
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-int intel_panel_fitter_pipe (struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pfit_control;
-
- /* i830 doesn't have a panel fitter */
- if (IS_I830(dev))
- return -1;
-
- pfit_control = I915_READ(PFIT_CONTROL);
-
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
-
- /* 965 can place panel fitter on either pipe */
- if (IS_I965G(dev))
- return (pfit_control >> 29) & 0x3;
-
- /* older chips can only use pipe 1 */
- return 1;
-}
-
struct fdi_m_n {
u32 tu;
u32 gmch_m;
@@ -2888,7 +2981,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
@@ -2905,7 +2998,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
@@ -2920,8 +3013,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
size >>= 2; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
+ plane ? "B" : "A",
+ size);
return size;
}
@@ -2936,14 +3029,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
static void pineview_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int unused,
- int pixel_size)
+ int planeb_clock, int sr_hdisplay, int unused,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct cxsr_latency *latency;
@@ -3055,13 +3148,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
entries_required = (((sr_latency_ns / line_time_us) +
1000) / 1000) * pixel_size * 64;
entries_required = DIV_ROUND_UP(entries_required,
- g4x_cursor_wm_info.cacheline_size);
+ g4x_cursor_wm_info.cacheline_size);
cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
if (cursor_sr > g4x_cursor_wm_info.max_wm)
@@ -3073,7 +3166,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
} else {
/* Turn off self refresh if both pipes are enabled */
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -3111,7 +3204,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
srwm = I965_FIFO_SIZE - sr_entries;
@@ -3120,11 +3213,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm &= 0x1ff;
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * 64;
+ pixel_size * 64;
sr_entries = DIV_ROUND_UP(sr_entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
- (sr_entries + i965_cursor_wm_info.guard_size);
+ (sr_entries + i965_cursor_wm_info.guard_size);
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3132,11 +3225,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
- if (IS_I965GM(dev))
+ if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
- if (IS_I965GM(dev))
+ if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
@@ -3166,9 +3259,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
int sr_clock, sr_entries = 0;
/* Create copies of the base settings for each pipe */
- if (IS_I965GM(dev) || IS_I945GM(dev))
+ if (IS_CRESTLINE(dev) || IS_I945GM(dev))
planea_params = planeb_params = i945_wm_info;
- else if (IS_I9XX(dev))
+ else if (!IS_GEN2(dev))
planea_params = planeb_params = i915_wm_info;
else
planea_params = planeb_params = i855_wm_info;
@@ -3203,7 +3296,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
srwm = total_size - sr_entries;
@@ -3228,7 +3321,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
+ planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
@@ -3262,146 +3355,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
-static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static bool ironlake_compute_wm0(struct drm_device *dev,
+ int pipe,
+ int *plane_wm,
+ int *cursor_wm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int sr_wm, cursor_wm;
- unsigned long line_time_us;
- int sr_clock, entries_required;
- u32 reg_value;
- int line_count;
- int planea_htotal = 0, planeb_htotal = 0;
struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size = 0;
+ int line_time_us, line_count, entries;
- /* Need htotal for all active display plane */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
- if (intel_crtc->plane == 0)
- planea_htotal = crtc->mode.htotal;
- else
- planeb_htotal = crtc->mode.htotal;
- }
- }
-
- /* Calculate and update the watermark for plane A */
- if (planea_clock) {
- entries_required = ((planea_clock / 1000) * pixel_size *
- ILK_LP0_PLANE_LATENCY) / 1000;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
- planea_wm = entries_required +
- ironlake_display_wm_info.guard_size;
-
- if (planea_wm > (int)ironlake_display_wm_info.max_wm)
- planea_wm = ironlake_display_wm_info.max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = (planea_htotal * 1000) / planea_clock;
-
- /* Use ns/us then divide to preserve precision */
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
-
- /* calculate the cursor watermark for cursor A */
- entries_required = line_count * 64 * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_wm_info.cacheline_size);
- cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
- if (cursora_wm > ironlake_cursor_wm_info.max_wm)
- cursora_wm = ironlake_cursor_wm_info.max_wm;
-
- reg_value = I915_READ(WM0_PIPEA_ILK);
- reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
- (cursora_wm & WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, reg_value);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
- "cursor: %d\n", planea_wm, cursora_wm);
- }
- /* Calculate and update the watermark for plane B */
- if (planeb_clock) {
- entries_required = ((planeb_clock / 1000) * pixel_size *
- ILK_LP0_PLANE_LATENCY) / 1000;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
- planeb_wm = entries_required +
- ironlake_display_wm_info.guard_size;
-
- if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
- planeb_wm = ironlake_display_wm_info.max_wm;
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = (planeb_htotal * 1000) / planeb_clock;
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_display_wm_info.cacheline_size);
+ *plane_wm = entries + ironlake_display_wm_info.guard_size;
+ if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
+ *plane_wm = ironlake_display_wm_info.max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_cursor_wm_info.cacheline_size);
+ *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
+ if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
+ *cursor_wm = ironlake_cursor_wm_info.max_wm;
- /* Use ns/us then divide to preserve precision */
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ return true;
+}
- /* calculate the cursor watermark for cursor B */
- entries_required = line_count * 64 * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_wm_info.cacheline_size);
- cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
- if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
- cursorb_wm = ironlake_cursor_wm_info.max_wm;
+static void ironlake_update_wm(struct drm_device *dev,
+ int planea_clock, int planeb_clock,
+ int sr_hdisplay, int sr_htotal,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int plane_wm, cursor_wm, enabled;
+ int tmp;
+
+ enabled = 0;
+ if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
- reg_value = I915_READ(WM0_PIPEB_ILK);
- reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
- (cursorb_wm & WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, reg_value);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
- "cursor: %d\n", planeb_wm, cursorb_wm);
+ if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
}
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
- if (!planea_clock || !planeb_clock) {
-
+ tmp = 0;
+ if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
+ unsigned long line_time_us;
+ int small, large, plane_fbc;
+ int sr_clock, entries;
+ int line_count, line_size;
/* Read the self-refresh latency. The unit is 0.5us */
int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = (sr_htotal * 1000) / sr_clock;
/* Use ns/us then divide to preserve precision */
line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
- / 1000;
+ / 1000;
+ line_size = sr_hdisplay * pixel_size;
- /* calculate the self-refresh watermark for display plane */
- entries_required = line_count * sr_hdisplay * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_srwm_info.cacheline_size);
- sr_wm = entries_required +
- ironlake_display_srwm_info.guard_size;
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
+ large = line_count * line_size;
- /* calculate the self-refresh watermark for display cursor */
- entries_required = line_count * pixel_size * 64;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_srwm_info.cacheline_size);
- cursor_wm = entries_required +
- ironlake_cursor_srwm_info.guard_size;
+ entries = DIV_ROUND_UP(min(small, large),
+ ironlake_display_srwm_info.cacheline_size);
- /* configure watermark and enable self-refresh */
- reg_value = I915_READ(WM1_LP_ILK);
- reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
- WM1_LP_CURSOR_MASK);
- reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
- (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+ plane_fbc = entries * 64;
+ plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
- I915_WRITE(WM1_LP_ILK, reg_value);
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", sr_wm, cursor_wm);
+ plane_wm = entries + ironlake_display_srwm_info.guard_size;
+ if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
+ plane_wm = ironlake_display_srwm_info.max_wm;
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
- }
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_cursor_srwm_info.cacheline_size);
+
+ cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
+ if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
+ cursor_wm = ironlake_cursor_srwm_info.max_wm;
+
+ /* configure watermark and enable self-refresh */
+ tmp = (WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ (plane_fbc << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
+ " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
+ }
+ I915_WRITE(WM1_LP_ILK, tmp);
+ /* XXX setup WM2 and WM3 */
}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -3433,7 +3510,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
*
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
- */
+ */
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3449,15 +3526,15 @@ static void intel_update_watermarks(struct drm_device *dev)
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+ if (intel_crtc->active) {
enabled++;
if (intel_crtc->plane == 0) {
DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
+ intel_crtc->pipe, crtc->mode.clock);
planea_clock = crtc->mode.clock;
} else {
DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
+ intel_crtc->pipe, crtc->mode.clock);
planeb_clock = crtc->mode.clock;
}
sr_hdisplay = crtc->mode.hdisplay;
@@ -3488,62 +3565,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ u32 fp_reg, dpll_reg;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
- int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
- int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
- int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
- int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
- int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int trans_dpll_sel = (pipe == 0) ? 0 : 1;
- int lvds_reg = LVDS;
- u32 temp;
- int sdvo_pixel_multiply;
+ u32 reg, temp;
int target_clock;
drm_vblank_pre_modeset(dev, pipe);
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder;
-
- if (encoder->crtc != crtc)
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
continue;
- intel_encoder = enc_to_intel_encoder(encoder);
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
- if (intel_encoder->needs_tv_clock)
+ if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_DVO:
@@ -3559,7 +3609,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- has_edp_encoder = intel_encoder;
+ has_edp_encoder = encoder;
break;
}
@@ -3569,15 +3619,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
- } else if (IS_I9XX(dev)) {
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
refclk = 96000;
if (HAS_PCH_SPLIT(dev))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
}
-
/*
* Returns a set of divisors for the desired target clock with the given
@@ -3593,13 +3642,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &reduced_clock);
+ dev_priv->lvds_downclock,
+ refclk,
+ &reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
@@ -3608,7 +3657,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
- "LVDS clock/downclock\n");
+ "LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
@@ -3616,14 +3665,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
+ && adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
+ && adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
@@ -3648,21 +3697,28 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
- link_bw = 270000;
+
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
/* determine panel color depth */
- temp = I915_READ(pipeconf_reg);
+ temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
if (is_lvds) {
- int lvds_reg = I915_READ(PCH_LVDS);
/* the BPC will be 6 if it is 18-bit LVDS panel */
- if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
} else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
- switch (dev_priv->edp_bpp/3) {
+ switch (dev_priv->edp.bpp/3) {
case 8:
temp |= PIPE_8BPC;
break;
@@ -3678,8 +3734,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
} else
temp |= PIPE_8BPC;
- I915_WRITE(pipeconf_reg, temp);
- I915_READ(pipeconf_reg);
+ I915_WRITE(PIPECONF(pipe), temp);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
@@ -3724,33 +3779,27 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
-
temp &= ~DREF_SSC_SOURCE_MASK;
temp |= DREF_SSC_SOURCE_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
if (has_edp_encoder) {
if (dev_priv->lvds_use_ssc) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
} else {
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
}
+ I915_WRITE(PCH_DREF_CONTROL, temp);
}
}
@@ -3766,21 +3815,24 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
+ dpll = 0;
if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ else if (HAS_PCH_SPLIT(dev))
+ dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ }
dpll |= DPLL_DVO_HIGH_SPEED;
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (HAS_PCH_SPLIT(dev))
- dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
if (is_dp)
dpll |= DPLL_DVO_HIGH_SPEED;
@@ -3810,7 +3862,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3837,7 +3889,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
- pipeconf = I915_READ(pipeconf_reg);
+ pipeconf = I915_READ(PIPECONF(pipe));
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -3851,7 +3903,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
- if (pipe == 0 && !IS_I965G(dev)) {
+ if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
*
@@ -3860,51 +3912,46 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
*/
if (mode->clock >
dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
- pipeconf |= PIPEACONF_DOUBLE_WIDE;
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
else
- pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
+ pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPEACONF_ENABLE;
+ pipeconf |= PIPECONF_ENABLE;
dpll |= DPLL_VCO_ENABLE;
-
- /* Disable the panel fitter if it was on our pipe */
- if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
- I915_WRITE(PFIT_CONTROL, 0);
-
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* assign to Ironlake registers */
if (HAS_PCH_SPLIT(dev)) {
- fp_reg = pch_fp_reg;
- dpll_reg = pch_dpll_reg;
+ fp_reg = PCH_FP0(pipe);
+ dpll_reg = PCH_DPLL(pipe);
+ } else {
+ fp_reg = FP0(pipe);
+ dpll_reg = DPLL(pipe);
}
if (!has_edp_encoder) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
+
+ POSTING_READ(dpll_reg);
udelay(150);
}
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0)
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ if (pipe == 0)
+ temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
else
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
- udelay(150);
- }
- if (HAS_PCH_SPLIT(dev)) {
- pipeconf &= ~PIPE_ENABLE_DITHER;
- pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ POSTING_READ(PCH_DPLL_SEL);
+ udelay(150);
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -3912,55 +3959,57 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* things on.
*/
if (is_lvds) {
- u32 lvds;
-
+ reg = LVDS;
if (HAS_PCH_SPLIT(dev))
- lvds_reg = PCH_LVDS;
+ reg = PCH_LVDS;
- lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ temp = I915_READ(reg);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
if (HAS_PCH_CPT(dev))
- lvds |= PORT_TRANS_B_SEL_CPT;
+ temp |= PORT_TRANS_B_SEL_CPT;
else
- lvds |= LVDS_PIPEB_SELECT;
+ temp |= LVDS_PIPEB_SELECT;
} else {
if (HAS_PCH_CPT(dev))
- lvds &= ~PORT_TRANS_SEL_MASK;
+ temp &= ~PORT_TRANS_SEL_MASK;
else
- lvds &= ~LVDS_PIPEB_SELECT;
+ temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
- lvds |= dev_priv->lvds_border_bits;
+ temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
- lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
- lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
- /* set the dithering flag */
- if (IS_I965G(dev)) {
- if (dev_priv->lvds_dither) {
- if (HAS_PCH_SPLIT(dev)) {
- pipeconf |= PIPE_ENABLE_DITHER;
- pipeconf |= PIPE_DITHER_TYPE_ST01;
- } else
- lvds |= LVDS_ENABLE_DITHER;
- } else {
- if (!HAS_PCH_SPLIT(dev)) {
- lvds &= ~LVDS_ENABLE_DITHER;
- }
- }
+ /* set the dithering flag on non-PCH LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
}
- I915_WRITE(lvds_reg, lvds);
- I915_READ(lvds_reg);
+ I915_WRITE(reg, temp);
}
+
+ /* set the dithering flag and clear for anything other than a panel. */
+ if (HAS_PCH_SPLIT(dev)) {
+ pipeconf &= ~PIPECONF_DITHER_EN;
+ pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
+ if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
+ pipeconf |= PIPECONF_DITHER_EN;
+ pipeconf |= PIPECONF_DITHER_TYPE_ST1;
+ }
+ }
+
if (is_dp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
else if (HAS_PCH_SPLIT(dev)) {
@@ -3981,26 +4030,32 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (!has_edp_encoder) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
- I915_READ(dpll_reg);
+
/* Wait for the clocks to stabilize. */
+ POSTING_READ(dpll_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ temp = 0;
if (is_sdvo) {
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
- ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
- } else
- I915_WRITE(dpll_md_reg, 0);
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
} else {
/* write it again -- the BIOS does, after all */
I915_WRITE(dpll_reg, dpll);
}
- I915_READ(dpll_reg);
+
/* Wait for the clocks to stabilize. */
+ POSTING_READ(dpll_reg);
udelay(150);
}
+ intel_crtc->lowfreq_avail = false;
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(fp_reg + 4, fp2);
intel_crtc->lowfreq_avail = true;
@@ -4010,7 +4065,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
} else {
I915_WRITE(fp_reg + 4, fp);
- intel_crtc->lowfreq_avail = false;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -4029,58 +4083,72 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
- I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ I915_WRITE(HTOTAL(pipe),
+ (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ I915_WRITE(HBLANK(pipe),
+ (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ I915_WRITE(HSYNC(pipe),
+ (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+
+ I915_WRITE(VTOTAL(pipe),
+ (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ I915_WRITE(VBLANK(pipe),
+ (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ I915_WRITE(VSYNC(pipe),
+ (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
- /* pipesrc and dspsize control the size that is scaled from, which should
- * always be the user's requested size.
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
*/
if (!HAS_PCH_SPLIT(dev)) {
- I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
- (mode->hdisplay - 1));
- I915_WRITE(dsppos_reg, 0);
+ I915_WRITE(DSPSIZE(plane),
+ ((mode->vdisplay - 1) << 16) |
+ (mode->hdisplay - 1));
+ I915_WRITE(DSPPOS(plane), 0);
}
- I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
- I915_WRITE(link_m1_reg, m_n.link_m);
- I915_WRITE(link_n1_reg, m_n.link_n);
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
if (has_edp_encoder) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} else {
/* enable FDI RX PLL too */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
- I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+
+ POSTING_READ(reg);
udelay(200);
/* enable FDI TX PLL too */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
/* enable FDI RX PCDCLK */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
- I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_PCDCLK);
+
+ POSTING_READ(reg);
udelay(200);
}
}
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_READ(pipeconf_reg);
+ I915_WRITE(PIPECONF(pipe), pipeconf);
+ POSTING_READ(PIPECONF(pipe));
intel_wait_for_vblank(dev, pipe);
@@ -4090,9 +4158,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
}
- I915_WRITE(dspcntr_reg, dspcntr);
+ I915_WRITE(DSPCNTR(plane), dspcntr);
- /* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
intel_update_watermarks(dev);
@@ -4185,7 +4252,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
-static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+ bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4198,7 +4266,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
pos = 0;
- if (intel_crtc->cursor_on && crtc->fb) {
+ if (on && crtc->enabled && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
@@ -4310,7 +4378,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->phys_obj->handle->busaddr;
}
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
finish:
@@ -4330,7 +4398,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
return 0;
fail_unpin:
@@ -4349,7 +4417,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
return 0;
}
@@ -4418,7 +4486,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct intel_crtc *intel_crtc;
struct drm_crtc *possible_crtc;
struct drm_crtc *supported_crtc =NULL;
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4499,7 +4567,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector, int dpms_mode)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4545,7 +4613,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (IS_PINEVIEW(dev))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -4649,8 +4717,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
@@ -4664,14 +4730,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4706,9 +4770,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
}
/* Schedule downclock */
- if (schedule)
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -4844,7 +4907,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
}
/* Non-busy -> busy, upclock */
- intel_increase_pllclock(crtc, true);
+ intel_increase_pllclock(crtc);
intel_crtc->busy = true;
} else {
/* Busy -> busy, put off timer */
@@ -4858,8 +4921,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (work) {
+ cancel_work_sync(&work->work);
+ kfree(work);
+ }
drm_crtc_cleanup(crtc);
+
kfree(intel_crtc);
}
@@ -4919,7 +4996,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
/* Initial scanout buffer will have a 0 pending flip count */
if ((atomic_read(&obj_priv->pending_flip) == 0) ||
atomic_dec_and_test(&obj_priv->pending_flip))
- DRM_WAKEUP(&dev_priv->pending_flip_queue);
+ wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5000,7 +5077,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, true);
if (ret)
goto cleanup_work;
@@ -5009,9 +5086,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_reference(obj);
crtc->fb = fb;
- ret = i915_gem_object_flush_write_domain(obj);
- if (ret)
- goto cleanup_objs;
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
@@ -5024,14 +5098,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (IS_GEN3(dev) || IS_GEN2(dev)) {
u32 flip_mask;
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ BEGIN_LP_RING(2);
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(0);
+ OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
@@ -5112,15 +5188,14 @@ cleanup_work:
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
.mode_set_base_atomic = intel_pipe_set_base_atomic,
- .prepare = intel_crtc_prepare,
- .commit = intel_crtc_commit,
.load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_disable,
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -5146,8 +5221,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
- intel_crtc->pipe = pipe;
- intel_crtc->plane = pipe;
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
@@ -5157,9 +5230,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
- if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+ if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+ intel_crtc->plane = !pipe;
}
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
@@ -5169,6 +5242,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = -1;
+ intel_crtc->active = true; /* force the pipe off on setup_init_config */
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_helper_funcs.prepare = ironlake_crtc_prepare;
+ intel_helper_funcs.commit = ironlake_crtc_commit;
+ } else {
+ intel_helper_funcs.prepare = i9xx_crtc_prepare;
+ intel_helper_funcs.commit = i9xx_crtc_commit;
+ }
+
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
@@ -5204,38 +5287,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return 0;
}
-struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
-{
- struct drm_crtc *crtc = NULL;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->pipe == pipe)
- break;
- }
- return crtc;
-}
-
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
+ struct intel_encoder *encoder;
int index_mask = 0;
- struct drm_encoder *encoder;
int entry = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- if (type_mask & intel_encoder->clone_mask)
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (type_mask & encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
}
+
return index_mask;
}
-
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
bool dpd_is_edp = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
@@ -5324,12 +5394,10 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- encoder->possible_crtcs = intel_encoder->crtc_mask;
- encoder->possible_clones = intel_encoder_clones(dev,
- intel_encoder->clone_mask);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_clones =
+ intel_encoder_clones(dev, encoder->clone_mask);
}
}
@@ -5363,8 +5431,25 @@ int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj)
{
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ return -EINVAL;
+
+ if (mode_cmd->pitch & 63)
+ return -EINVAL;
+
+ switch (mode_cmd->bpp) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -EINVAL;
+ }
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -5473,6 +5558,10 @@ void ironlake_enable_drps(struct drm_device *dev)
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
I915_WRITE(RCDNEI, 100000);
@@ -5515,7 +5604,7 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
@@ -5714,20 +5803,20 @@ void intel_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
- } else if (IS_I965GM(dev)) {
+ } else if (IS_CRESTLINE(dev)) {
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
- } else if (IS_I965G(dev)) {
+ } else if (IS_BROADWATER(dev)) {
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
- } else if (IS_I9XX(dev)) {
+ } else if (IS_GEN3(dev)) {
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -5809,7 +5898,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_I965GM(dev)) {
+ } else if (IS_CRESTLINE(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -5869,9 +5958,9 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = pineview_update_wm;
} else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
- else if (IS_I965G(dev))
+ else if (IS_GEN4(dev))
dev_priv->display.update_wm = i965_update_wm;
- else if (IS_I9XX(dev)) {
+ else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else if (IS_I85X(dev)) {
@@ -5985,24 +6074,24 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_display(dev);
- if (IS_I965G(dev)) {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- } else if (IS_I9XX(dev)) {
+ if (IS_GEN2(dev)) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else if (IS_GEN3(dev)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
}
/* set memory base */
- if (IS_I9XX(dev))
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
- else
+ if (IS_GEN2(dev))
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+ else
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
- if (IS_MOBILE(dev) || IS_I9XX(dev))
+ if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
@@ -6038,10 +6127,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- mutex_lock(&dev->struct_mutex);
-
drm_kms_helper_poll_fini(dev);
- intel_fbdev_fini(dev);
+ mutex_lock(&dev->struct_mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
@@ -6049,12 +6136,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
continue;
intel_crtc = to_intel_crtc(crtc);
- intel_increase_pllclock(crtc, false);
- del_timer_sync(&intel_crtc->idle_timer);
+ intel_increase_pllclock(crtc);
}
- del_timer_sync(&dev_priv->idle_timer);
-
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
@@ -6083,33 +6167,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
+ /* Disable the irq before mode object teardown, for the irq might
+ * enqueue unpin/hotplug work. */
+ drm_irq_uninstall(dev);
+ cancel_work_sync(&dev_priv->hotplug_work);
+
+ /* Shut off idle work before the crtcs get freed. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ del_timer_sync(&intel_crtc->idle_timer);
+ }
+ del_timer_sync(&dev_priv->idle_timer);
+ cancel_work_sync(&dev_priv->idle_work);
+
drm_mode_config_cleanup(dev);
}
-
/*
* Return which encoder is currently attached for connector.
*/
-struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- continue;
+ return &intel_attached_encoder(connector)->base;
+}
- encoder = obj_to_encoder(obj);
- return encoder;
- }
- return NULL;
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1a51ee07de3e..152d94507b79 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -58,14 +58,23 @@ struct intel_dp {
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
};
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
+ return container_of(encoder, struct intel_dp, base.base);
+}
+
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dp, base);
}
-static void intel_dp_link_train(struct intel_dp *intel_dp);
+static void intel_dp_start_link_train(struct intel_dp *intel_dp);
+static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
@@ -130,7 +139,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- return (pixel_clock * dev_priv->edp_bpp) / 8;
+ return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
else
return pixel_clock * 3;
}
@@ -145,8 +154,7 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
@@ -233,7 +241,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -246,8 +254,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
+ *
+ * Note that PCH attached eDP panels should use a 125MHz input
+ * clock divider.
*/
- if (IS_eDP(intel_dp)) {
+ if (IS_eDP(intel_dp) && !IS_PCH_eDP(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
@@ -642,7 +653,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = intel_dp->lane_count;
if (IS_PCH_eDP(intel_dp))
- bpp = dev_priv->edp_bpp;
+ bpp = dev_priv->edp.bpp;
break;
}
}
@@ -698,7 +709,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.enc.crtc;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_dp->DP = (DP_VOLTAGE_0_4 |
@@ -754,13 +765,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_panel_on (struct drm_device *dev)
+/* Returns true if the panel was already on when called */
+static bool ironlake_edp_panel_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
if (I915_READ(PCH_PP_STATUS) & PP_ON)
- return;
+ return true;
pp = I915_READ(PCH_PP_CONTROL);
@@ -769,17 +781,24 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+ pp |= POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
- if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
+ /* Ouch. We need to wait here for some panels, like Dell e6510
+ * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
+ */
+ msleep(300);
+
+ if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
- pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+ pp &= ~(PANEL_UNLOCK_REGS);
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ return false;
}
static void ironlake_edp_panel_off (struct drm_device *dev)
@@ -797,14 +816,43 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
- if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
+ if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000))
DRM_ERROR("panel off wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
/* Make sure VDD is enabled so DP AUX will work */
- pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ /* Ouch. We need to wait here for some panels, like Dell e6510
+ * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
+ */
+ msleep(300);
+}
+
+static void ironlake_edp_panel_vdd_on(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp |= EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ msleep(300);
+}
+
+static void ironlake_edp_panel_vdd_off(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ msleep(300);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -850,6 +898,7 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
dpa_ctl = I915_READ(DP_A);
dpa_ctl |= DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
udelay(200);
}
@@ -860,9 +909,10 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- if (IS_eDP(intel_dp)) {
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ ironlake_edp_panel_off(dev);
ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_on(dev);
+ ironlake_edp_panel_vdd_on(dev);
ironlake_edp_pll_on(encoder);
}
if (dp_reg & DP_PORT_EN)
@@ -873,14 +923,17 @@ static void intel_dp_commit(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_dp);
- }
+ intel_dp_start_link_train(intel_dp);
+
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ ironlake_edp_panel_on(dev);
+
+ intel_dp_complete_link_train(intel_dp);
+
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_backlight_on(dev);
+ intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
}
static void
@@ -902,9 +955,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
ironlake_edp_pll_off(encoder);
} else {
if (!(dp_reg & DP_PORT_EN)) {
+ intel_dp_start_link_train(intel_dp);
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_panel_on(dev);
- intel_dp_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_backlight_on(dev);
}
@@ -917,14 +971,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp,
- uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp)
{
int ret;
ret = intel_dp_aux_native_read(intel_dp,
DP_LANE0_1_STATUS,
- link_status, DP_LINK_STATUS_SIZE);
+ intel_dp->link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
return false;
return true;
@@ -999,18 +1052,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
- uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane_count,
- uint8_t train_set[4])
+intel_get_adjust_train(struct intel_dp *intel_dp)
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
- for (lane = 0; lane < lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
if (this_v > v)
v = this_v;
@@ -1025,7 +1075,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
- train_set[lane] = v | p;
+ intel_dp->train_set[lane] = v | p;
}
static uint32_t
@@ -1116,18 +1166,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
-intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+intel_channel_eq_ok(struct intel_dp *intel_dp)
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
- lane_align = intel_dp_link_status(link_status,
+ lane_align = intel_dp_link_status(intel_dp->link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ lane_status = intel_get_lane_status(intel_dp->link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
@@ -1137,48 +1187,47 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
- uint8_t dp_train_pat,
- uint8_t train_set[4],
- bool first)
+ uint8_t dp_train_pat)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
int ret;
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
- if (first)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET, train_set, 4);
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set, 4);
if (ret != 4)
return false;
return true;
}
+/* Enable corresponding port and start training pattern 1 */
static void
-intel_dp_link_train(struct intel_dp *intel_dp)
+intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint8_t train_set[4];
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
- bool channel_eq = false;
- bool first = true;
int tries;
u32 reg;
uint32_t DP = intel_dp->DP;
+ /* Enable output, wait for it to become active */
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1189,18 +1238,18 @@ intel_dp_link_train(struct intel_dp *intel_dp)
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
- memset(train_set, 0, 4);
+ memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
- /* Use train_set[0] to set the voltage and pre emphasis values */
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1210,52 +1259,64 @@ intel_dp_link_train(struct intel_dp *intel_dp)
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1, train_set, first))
+ DP_TRAINING_PATTERN_1))
break;
- first = false;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_dp))
break;
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
- if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
- if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++tries;
if (tries == 5)
break;
} else
tries = 0;
- voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
}
+ intel_dp->DP = DP;
+}
+
+static void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool channel_eq = false;
+ int tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
/* channel equalization */
tries = 0;
channel_eq = false;
for (;;) {
- /* Use train_set[0] to set the voltage and pre emphasis values */
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1266,15 +1327,14 @@ intel_dp_link_train(struct intel_dp *intel_dp)
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2, train_set,
- false))
+ DP_TRAINING_PATTERN_2))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_dp))
break;
- if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ if (intel_channel_eq_ok(intel_dp)) {
channel_eq = true;
break;
}
@@ -1283,8 +1343,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
if (tries > 5)
break;
- /* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
++tries;
}
@@ -1302,7 +1362,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
@@ -1318,14 +1378,13 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
- POSTING_READ(intel_dp->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(intel_dp->output_reg);
}
+ POSTING_READ(intel_dp->output_reg);
- udelay(17000);
+ msleep(17);
if (IS_eDP(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
@@ -1345,27 +1404,29 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- uint8_t link_status[DP_LINK_STATUS_SIZE];
-
- if (!intel_dp->base.enc.crtc)
+ if (!intel_dp->base.base.crtc)
return;
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ if (!intel_dp_get_link_status(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
- if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
- intel_dp_link_train(intel_dp);
+ if (!intel_channel_eq_ok(intel_dp)) {
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
enum drm_connector_status status;
+ /* Panel needs power for AUX to work */
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ ironlake_edp_panel_vdd_on(connector->dev);
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_dp,
0x000, intel_dp->dpcd,
@@ -1376,6 +1437,8 @@ ironlake_dp_detect(struct drm_connector *connector)
}
DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
+ if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ ironlake_edp_panel_vdd_off(connector->dev);
return status;
}
@@ -1388,9 +1451,8 @@ ironlake_dp_detect(struct drm_connector *connector)
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp, bit;
enum drm_connector_status status;
@@ -1432,16 +1494,15 @@ intel_dp_detect(struct drm_connector *connector, bool force)
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
+ ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
if (ret) {
if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
!dev_priv->panel_fixed_mode) {
@@ -1479,6 +1540,15 @@ intel_dp_destroy (struct drm_connector *connector)
kfree(connector);
}
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ i2c_del_adapter(&intel_dp->adapter);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dp);
+}
+
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
@@ -1497,14 +1567,14 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_dp_encoder_destroy,
};
-void
+static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
@@ -1613,12 +1683,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp->has_audio = false;
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
- drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
/* Set up the DDC bus. */
@@ -1648,7 +1717,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp_i2c_init(intel_dp, intel_connector, name);
- intel_encoder->ddc_bus = &intel_dp->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ad312ca6b3e5..40e99bf27ff7 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,14 +26,12 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
-#include <linux/i2c-algo-bit.h>
#include "i915_drv.h"
#include "drm_crtc.h"
-
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
-#define wait_for(COND, MS, W) ({ \
+#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (! (COND)) { \
@@ -41,11 +39,24 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W) msleep(W); \
+ if (W && !in_dbg_master()) msleep(W); \
} \
ret__; \
})
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+#define MSLEEP(x) do { \
+ if (in_dbg_master()) \
+ mdelay(x); \
+ else \
+ msleep(x); \
+} while(0)
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
/*
* Display related stuff
*/
@@ -96,24 +107,39 @@
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
-struct intel_i2c_chan {
- struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
- u32 reg; /* GPIO reg */
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
-};
+/* drm_display_mode->private_flags */
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
};
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
+ struct drm_display_mode *our_mode;
+};
struct intel_encoder {
- struct drm_encoder enc;
+ struct drm_encoder base;
int type;
- struct i2c_adapter *i2c_bus;
- struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
void (*hot_plug)(struct intel_encoder *);
@@ -123,32 +149,7 @@ struct intel_encoder {
struct intel_connector {
struct drm_connector base;
-};
-
-struct intel_crtc;
-struct intel_overlay {
- struct drm_device *dev;
- struct intel_crtc *crtc;
- struct drm_i915_gem_object *vid_bo;
- struct drm_i915_gem_object *old_vid_bo;
- int active;
- int pfit_active;
- u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
- u32 color_key;
- u32 brightness, contrast, saturation;
- u32 old_xscale, old_yscale;
- /* register access */
- u32 flip_addr;
- struct drm_i915_gem_object *reg_bo;
- void *virt_addr;
- /* flip handling */
- uint32_t last_flip_req;
- int hw_wedged;
-#define HW_WEDGED 1
-#define NEEDS_WAIT_FOR_FLIP 2
-#define RELEASE_OLD_VID 3
-#define SWITCH_OFF_STAGE_1 4
-#define SWITCH_OFF_STAGE_2 5
+ struct intel_encoder *encoder;
};
struct intel_crtc {
@@ -157,6 +158,7 @@ struct intel_crtc {
enum plane plane;
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
+ bool active; /* is the crtc on? independent of the dpms mode */
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
@@ -168,14 +170,21 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
- bool cursor_visible, cursor_on;
+ bool cursor_visible;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
-#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
@@ -186,13 +195,8 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name);
-void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
-void intel_i2c_reset_gmbus(struct drm_device *dev);
+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
@@ -209,29 +213,37 @@ extern bool intel_pch_has_edp(struct drm_crtc *crtc);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
-
+/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
+extern u32 intel_panel_get_backlight(struct drm_device *dev);
+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+ return to_intel_connector(connector)->encoder;
+}
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct drm_display_mode *mode,
@@ -253,7 +265,8 @@ extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj);
+ struct drm_gem_object *obj,
+ bool pipelined);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
@@ -268,9 +281,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- int interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay,
+ bool interruptible);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7c9ec1472d46..ea373283c93b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.name = "ch7017",
.dvo_reg = DVOC,
.slave_addr = 0x75,
- .gpio = GPIOE,
+ .gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
}
};
@@ -88,7 +88,13 @@ struct intel_dvo {
static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
+ return container_of(encoder, struct intel_dvo, base.base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dvo, base);
}
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
+ intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
struct drm_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_from_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
if (mode) {
@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
- struct i2c_adapter *i2cbus = NULL;
- int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
}
intel_encoder = &intel_dvo->base;
-
- /* Set up the DDC bus */
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
- if (!intel_encoder->ddc_bus)
- goto free_intel;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type);
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
int gpio;
/* Allow the I2C driver info to specify the GPIO to be used in
@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
if (dvo->gpio != 0)
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
- gpio = GPIOB;
+ gpio = GMBUS_PORT_SSC;
else
- gpio = GPIOE;
+ gpio = GMBUS_PORT_DPB;
/* Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
- if (!(i2cbus = intel_i2c_create(dev, gpio,
- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
- continue;
- }
+ i2c = &dev_priv->gmbus[gpio].adapter;
intel_dvo->dev = *dvo;
- ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
- if (!ret)
+ if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
continue;
intel_encoder->type = INTEL_OUTPUT_DVO;
@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_encoder_init(dev, &intel_encoder->enc,
- &intel_dvo_enc_funcs, encoder_type);
- drm_encoder_helper_add(&intel_encoder->enc,
+ drm_encoder_helper_add(&intel_encoder->base,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
return;
}
- intel_i2c_destroy(intel_encoder->ddc_bus);
- /* Didn't find a chip, so tear down. */
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
-free_intel:
+ drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dvo);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 56ad9df2ccb5..b937ccfa7bec 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,13 +44,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_fbdev {
- struct drm_fb_helper helper;
- struct intel_framebuffer ifb;
- struct list_head fbdev_list;
- struct drm_display_mode *our_mode;
-};
-
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
struct device *device = &dev->pdev->dev;
- int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, fbo);
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
}
- /* Flush everything out, we'll be doing GTT only from now on */
- ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
- if (ret) {
- DRM_ERROR("failed to bind fb: %d.\n", ret);
- goto out_unpin;
- }
-
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- if (IS_I9XX(dev))
+ if (!IS_GEN2(dev))
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intel_fb_find_or_create_single,
};
-int intel_fbdev_destroy(struct drm_device *dev,
- struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
{
struct fb_info *info;
struct intel_framebuffer *ifb = &ifbdev->ifb;
@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
- drm_gem_object_handle_unreference(ifb->obj);
- drm_gem_object_unreference(ifb->obj);
+ drm_gem_object_handle_unreference_unlocked(ifb->obj);
+ drm_gem_object_unreference_unlocked(ifb->obj);
}
-
- return 0;
}
int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 926934a482ec..9fb9501f2d07 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -40,12 +40,19 @@
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
+ int ddc_bus;
bool has_hdmi_sink;
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
+ return container_of(encoder, struct intel_hdmi, base.base);
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_hdmi, base);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -141,13 +148,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct edid *edid = NULL;
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
intel_hdmi->has_hdmi_sink = false;
- edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
+ edid = drm_get_edid(connector,
+ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -163,14 +171,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
+ return intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -198,7 +207,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -224,6 +233,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
}
intel_encoder = &intel_hdmi->base;
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
@@ -239,39 +251,31 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
- "HDMIB");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
- "HDMIC");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
- "HDMID");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
- if (!intel_encoder->ddc_bus)
- goto err_connector;
intel_hdmi->sdvox_reg = sdvox_reg;
- drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -282,13 +286,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
-
- return;
-
-err_connector:
- drm_connector_cleanup(connector);
- kfree(intel_hdmi);
- kfree(intel_connector);
-
- return;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2649c7df14c..2449a74d4d80 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
- * Copyright © 2006-2008 Intel Corporation
+ * Copyright © 2006-2008,2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,10 +24,9 @@
*
* Authors:
* Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
*/
#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm.h"
@@ -35,79 +34,106 @@
#include "i915_drm.h"
#include "i915_drv.h"
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct drm_i915_private *dev_priv;
+ u32 reg;
+};
+
+void
+intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_GMBUS0, 0);
+ else
+ I915_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev))
+ if (!IS_PINEVIEW(dev_priv->dev))
return;
+
+ val = I915_READ(DSPCLK_GATE_D);
if (enable)
- I915_WRITE(DSPCLK_GATE_D,
- I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
else
- I915_WRITE(DSPCLK_GATE_D,
- I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
}
-/*
- * Intel GPIO access functions
- */
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = 0;
-#define I2C_RISEFALL_TIME 20
+ /* On most chips, these bits must be preserved in software. */
+ if (!IS_I830(dev) && !IS_845G(dev))
+ reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
static int get_clock(void *data)
{
- struct intel_i2c_chan *chan = data;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
- return ((val & GPIO_CLOCK_VAL_IN) != 0);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE(gpio->reg, reserved);
+ return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
- struct intel_i2c_chan *chan = data;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
- return ((val & GPIO_DATA_VAL_IN) != 0);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE(gpio->reg, reserved);
+ return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 reserved = 0, clock_bits;
-
- /* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ u32 clock_bits;
if (state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE(chan->reg, reserved | clock_bits);
- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+
+ I915_WRITE(gpio->reg, reserved | clock_bits);
+ POSTING_READ(gpio->reg);
}
static void set_data(void *data, int state_high)
{
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 reserved = 0, data_bits;
-
- /* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ u32 data_bits;
if (state_high)
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
@@ -115,109 +141,312 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE(chan->reg, reserved | data_bits);
- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+ I915_WRITE(gpio->reg, reserved | data_bits);
+ POSTING_READ(gpio->reg);
}
-/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
- * engine, but if the BIOS leaves it enabled, then that can break our use
- * of the bit-banging I2C interfaces. This is notably the case with the
- * Mac Mini in EFI mode.
- */
-void
-intel_i2c_reset_gmbus(struct drm_device *dev)
+static struct i2c_adapter *
+intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ static const int map_pin_to_reg[] = {
+ 0,
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ GPIOF,
+ };
+ struct intel_gpio *gpio;
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_GMBUS0, 0);
- } else {
- I915_WRITE(GMBUS0, 0);
+ if (pin < 1 || pin > 7)
+ return NULL;
+
+ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return NULL;
+
+ gpio->reg = map_pin_to_reg[pin];
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ gpio->reg += PCH_GPIOA - GPIOA;
+ gpio->dev_priv = dev_priv;
+
+ snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
+ gpio->adapter.owner = THIS_MODULE;
+ gpio->adapter.algo_data = &gpio->algo;
+ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->algo.setsda = set_data;
+ gpio->algo.setscl = set_clock;
+ gpio->algo.getsda = get_data;
+ gpio->algo.getscl = get_clock;
+ gpio->algo.udelay = I2C_RISEFALL_TIME;
+ gpio->algo.timeout = usecs_to_jiffies(2200);
+ gpio->algo.data = gpio;
+
+ if (i2c_bit_add_bus(&gpio->adapter))
+ goto out_free;
+
+ return &gpio->adapter;
+
+out_free:
+ kfree(gpio);
+ return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gpio *gpio = container_of(adapter,
+ struct intel_gpio,
+ adapter);
+ int ret;
+
+ intel_i2c_reset(dev_priv->dev);
+
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ udelay(I2C_RISEFALL_TIME);
+
+ ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = adapter->algo_data;
+ int i, reg_offset;
+
+ if (bus->force_bit)
+ return intel_i2c_quirk_xfer(dev_priv,
+ bus->force_bit, msgs, num);
+
+ reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+
+ I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ u16 len = msgs[i].len;
+ u8 *buf = msgs[i].buf;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ POSTING_READ(GMBUS2+reg_offset);
+ do {
+ u32 val, loop = 0;
+
+ if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ } while (len);
+ } else {
+ u32 val, loop;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ POSTING_READ(GMBUS2+reg_offset);
+
+ while (len) {
+ if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ POSTING_READ(GMBUS2+reg_offset);
+ }
+ }
+
+ if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
}
+
+ return num;
+
+timeout:
+ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+ bus->reg0 & 0xff, bus->adapter.name);
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+ if (!bus->force_bit)
+ return -ENOMEM;
+
+ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
}
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+
+ if (bus->force_bit)
+ bus->force_bit->algo->functionality(bus->force_bit);
+
+ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
/**
- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
* @dev: DRM device
- * @output: driver specific output device
- * @reg: GPIO reg to use
- * @name: name for this bus
- * @slave_addr: slave address (if fixed)
- *
- * Creates and registers a new i2c bus with the Linux i2c layer, for use
- * in output probing and control (e.g. DDC or SDVO control functions).
- *
- * Possible values for @reg include:
- * %GPIOA
- * %GPIOB
- * %GPIOC
- * %GPIOD
- * %GPIOE
- * %GPIOF
- * %GPIOG
- * %GPIOH
- * see PRM for details on how these different busses are used.
*/
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name)
+int intel_setup_gmbus(struct drm_device *dev)
{
- struct intel_i2c_chan *chan;
+ static const char *names[GMBUS_NUM_PORTS] = {
+ "disabled",
+ "ssc",
+ "vga",
+ "panel",
+ "dpc",
+ "dpb",
+ "reserved"
+ "dpd",
+ };
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
- chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
- if (!chan)
- goto out_free;
+ dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ GFP_KERNEL);
+ if (dev_priv->gmbus == NULL)
+ return -ENOMEM;
- chan->drm_dev = dev;
- chan->reg = reg;
- snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
- chan->adapter.owner = THIS_MODULE;
- chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
- chan->algo.setsda = set_data;
- chan->algo.setscl = set_clock;
- chan->algo.getsda = get_data;
- chan->algo.getscl = get_clock;
- chan->algo.udelay = 20;
- chan->algo.timeout = usecs_to_jiffies(2200);
- chan->algo.data = chan;
-
- i2c_set_adapdata(&chan->adapter, chan);
-
- if(i2c_bit_add_bus(&chan->adapter))
- goto out_free;
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
- intel_i2c_reset_gmbus(dev);
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ I2C_NAME_SIZE,
+ "gmbus %s",
+ names[i]);
- /* JJJ: raise SCL and SDA? */
- intel_i2c_quirk_set(dev, true);
- set_data(chan, 1);
- set_clock(chan, 1);
- intel_i2c_quirk_set(dev, false);
- udelay(20);
+ bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.algo_data = dev_priv;
- return &chan->adapter;
+ bus->adapter.algo = &gmbus_algorithm;
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
-out_free:
- kfree(chan);
- return NULL;
+ /* By default use a conservative clock rate */
+ bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+ /* XXX force bit banging until GMBUS is fully debugged */
+ bus->force_bit = intel_gpio_create(dev_priv, i);
+ }
+
+ intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+
+err:
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+ return ret;
}
-/**
- * intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
- *
- * Unregister the adapter from the i2c layer, then free the structure.
- */
-void intel_i2c_destroy(struct i2c_adapter *adapter)
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ /* speed:
+ * 0x0 = 100 KHz
+ * 0x1 = 50 KHz
+ * 0x2 = 400 KHz
+ * 0x3 = 1000 Khz
+ */
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ if (force_bit) {
+ if (bus->force_bit == NULL) {
+ struct drm_i915_private *dev_priv = adapter->algo_data;
+ bus->force_bit = intel_gpio_create(dev_priv,
+ bus->reg0 & 0xff);
+ }
+ } else {
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ bus->force_bit = NULL;
+ }
+ }
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
{
- struct intel_i2c_chan *chan;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
- if (!adapter)
+ if (dev_priv->gmbus == NULL)
return;
- chan = container_of(adapter,
- struct intel_i2c_chan,
- adapter);
- i2c_del_adapter(&chan->adapter);
- kfree(chan);
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ }
+ i2c_del_adapter(&bus->adapter);
+ }
+
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6ec39a86ed06..f1a649990ea9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -43,102 +43,76 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds {
struct intel_encoder base;
+
+ struct edid *edid;
+
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
+ bool pfit_dirty;
+
+ struct drm_display_mode *fixed_mode;
};
-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
-}
-
-/**
- * Sets the backlight level.
- *
- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
- */
-static void intel_lvds_set_backlight(struct drm_device *dev, int level)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blc_pwm_ctl, reg;
-
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL;
- else
- reg = BLC_PWM_CTL;
-
- blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(reg, (blc_pwm_ctl |
- (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ return container_of(encoder, struct intel_lvds, base.base);
}
-/**
- * Returns the maximum level of the backlight duty cycle field.
- */
-static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
-
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_PCH_CTL2;
- else
- reg = BLC_PWM_CTL;
-
- return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+ return container_of(intel_attached_encoder(connector),
+ struct intel_lvds, base);
}
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_set_power(struct drm_device *dev, bool on)
+static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
{
+ struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, status_reg, lvds_reg;
+ u32 ctl_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- status_reg = PCH_PP_STATUS;
lvds_reg = PCH_LVDS;
} else {
ctl_reg = PP_CONTROL;
- status_reg = PP_STATUS;
lvds_reg = LVDS;
}
if (on) {
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
-
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
- POWER_TARGET_ON);
- if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
- DRM_ERROR("timed out waiting to enable LVDS pipe");
-
- intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ intel_panel_set_backlight(dev, dev_priv->backlight_level);
} else {
- intel_lvds_set_backlight(dev, 0);
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+ intel_panel_set_backlight(dev, 0);
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
- ~POWER_TARGET_ON);
- if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
- DRM_ERROR("timed out waiting for LVDS pipe to turn off");
+ if (intel_lvds->pfit_control) {
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+ I915_WRITE(PFIT_CONTROL, 0);
+ intel_lvds->pfit_control = 0;
+ intel_lvds->pfit_dirty = false;
+ }
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
}
+ POSTING_READ(lvds_reg);
}
static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(dev, true);
+ intel_lvds_set_power(intel_lvds, true);
else
- intel_lvds_set_power(dev, false);
+ intel_lvds_set_power(intel_lvds, false);
/* XXX: We never power down the LVDS pairs. */
}
@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- }
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
return MODE_OK;
}
@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
- if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
}
- /* If we don't have a panel mode, there is nothing we can do */
- if (dev_priv->panel_fixed_mode == NULL)
- return true;
/*
* We have timings from the BIOS for the panel, put them in
@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+ intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* Make sure pre-965s set dither correctly */
- if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+ if (INTEL_INFO(dev)->gen < 4) {
+ if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
}
@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
goto out;
/* 965+ wants fuzzy fitting */
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* Fortunately this is all done for us in hw.
*/
pfit_control |= PFIT_ENABLE;
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ if (pfit_control != intel_lvds->pfit_control ||
+ pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
+ intel_lvds->pfit_control = pfit_control;
+ intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ intel_lvds->pfit_dirty = true;
+ }
dev_priv->lvds_border_bits = border;
/*
@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
-
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL;
- else
- reg = BLC_PWM_CTL;
-
- dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+ /* We try to do the minimum that is necessary in order to unlock
+ * the registers for mode setting.
+ *
+ * On Ironlake, this is quite simple as we just set the unlock key
+ * and ignore all subtleties. (This may cause some issues...)
+ *
+ * Prior to Ironlake, we must disable the pipe if we want to adjust
+ * the panel fitter. However at all other times we can just reset
+ * the registers regardless.
+ */
- intel_lvds_set_power(dev, false);
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else if (intel_lvds->pfit_dirty) {
+ I915_WRITE(PP_CONTROL,
+ (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
+ & ~POWER_TARGET_ON);
+ } else {
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ }
}
-static void intel_lvds_commit( struct drm_encoder *encoder)
+static void intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
+ if (dev_priv->backlight_level == 0)
+ dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+ /* Undo any unlocking done in prepare to prevent accidental
+ * adjustment of the registers.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 val = I915_READ(PCH_PP_CONTROL);
+ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+ I915_WRITE(PCH_PP_CONTROL, val & 0x3);
+ } else {
+ u32 val = I915_READ(PP_CONTROL);
+ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+ I915_WRITE(PP_CONTROL, val & 0x3);
+ }
- intel_lvds_set_power(dev, true);
+ /* Always do a full power on as we do not know what state
+ * we were left in.
+ */
+ intel_lvds_set_power(intel_lvds, true);
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
/*
* The LVDS pin pair will already have been turned on in the
@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_SPLIT(dev))
return;
+ if (!intel_lvds->pfit_dirty)
+ return;
+
/*
* Enable automatic panel scaling so that non-native modes fill the
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ intel_lvds->pfit_dirty = false;
}
/**
@@ -465,38 +477,22 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- if (dev_priv->lvds_edid_good) {
- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ struct drm_display_mode *mode;
- if (ret)
- return ret;
+ if (intel_lvds->edid) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_lvds->edid);
+ return drm_add_edid_modes(connector, intel_lvds->edid);
}
- /* Didn't get an EDID, so
- * Set wide sync ranges so we get all modes
- * handed to valid_mode for checking
- */
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
-
- if (dev_priv->panel_fixed_mode != NULL) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
- drm_mode_probed_add(connector, mode);
-
- return 1;
- }
+ mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+ if (mode == 0)
+ return 0;
- return 0;
+ drm_mode_probed_add(connector, mode);
+ return 1;
}
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
@@ -587,18 +583,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_device *dev = connector->dev;
- if (property == dev->mode_config.scaling_mode_property &&
- connector->encoder) {
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct drm_encoder *encoder = connector->encoder;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct drm_crtc *crtc = intel_lvds->base.base.crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
- return 0;
+ return -EINVAL;
}
+
if (intel_lvds->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
@@ -628,7 +623,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -726,16 +721,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
* Find the reduced downclock for LVDS in EDID.
*/
static void intel_find_lvds_downclock(struct drm_device *dev,
- struct drm_connector *connector)
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *scan, *panel_fixed_mode;
+ struct drm_display_mode *scan;
int temp_downclock;
- panel_fixed_mode = dev_priv->panel_fixed_mode;
- temp_downclock = panel_fixed_mode->clock;
-
- mutex_lock(&dev->mode_config.mutex);
+ temp_downclock = fixed_mode->clock;
list_for_each_entry(scan, &connector->probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
@@ -744,14 +737,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
- if (scan->hdisplay == panel_fixed_mode->hdisplay &&
- scan->hsync_start == panel_fixed_mode->hsync_start &&
- scan->hsync_end == panel_fixed_mode->hsync_end &&
- scan->htotal == panel_fixed_mode->htotal &&
- scan->vdisplay == panel_fixed_mode->vdisplay &&
- scan->vsync_start == panel_fixed_mode->vsync_start &&
- scan->vsync_end == panel_fixed_mode->vsync_end &&
- scan->vtotal == panel_fixed_mode->vtotal) {
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
if (scan->clock < temp_downclock) {
/*
* The downclock is already found. But we
@@ -761,17 +754,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
}
}
}
- mutex_unlock(&dev->mode_config.mutex);
- if (temp_downclock < panel_fixed_mode->clock &&
- i915_lvds_downclock) {
+ if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
- "Normal clock %dKhz, downclock %dKhz\n",
- panel_fixed_mode->clock, temp_downclock);
+ "Normal clock %dKhz, downclock %dKhz\n",
+ fixed_mode->clock, temp_downclock);
}
- return;
}
/*
@@ -780,38 +770,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
* If it is present, return 1.
* If it is not present, return false.
* If no child dev is parsed from VBT, it assumes that the LVDS is present.
- * Note: The addin_offset should also be checked for LVDS panel.
- * Only when it is non-zero, it is assumed that it is present.
*/
-static int lvds_is_present_in_vbt(struct drm_device *dev)
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, ret;
+ int i;
if (!dev_priv->child_dev_num)
- return 1;
+ return true;
- ret = 0;
for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not LFP, continue.
- * If the device type is 0x22, it is also regarded as LFP.
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
*/
- if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
- p_child->device_type != DEVICE_TYPE_LFP)
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
continue;
- /* The addin_offset should be checked. Only when it is
- * non-zero, it is regarded as present.
+ if (child->i2c_pin)
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
*/
- if (p_child->addin_offset) {
- ret = 1;
- break;
- }
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
}
- return ret;
+
+ return false;
+}
+
+static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 buf = 0;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0xA0,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf,
+ },
+ };
+ struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
+ /* XXX this only appears to work when using GMBUS */
+ if (intel_gmbus_is_forced_bit(i2c))
+ return true;
+ return i2c_transfer(i2c, msgs, 1) == 1;
}
/**
@@ -832,13 +851,15 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
u32 lvds;
- int pipe, gpio = GPIOC;
+ int pipe;
+ u8 pin;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
- if (!lvds_is_present_in_vbt(dev)) {
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
@@ -846,11 +867,15 @@ void intel_lvds_init(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
- if (dev_priv->edp_support) {
+ if (dev_priv->edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return;
}
- gpio = PCH_GPIOC;
+ }
+
+ if (!intel_lvds_ddc_probe(dev, pin)) {
+ DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
+ return;
}
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
@@ -864,16 +889,20 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+ }
+
intel_encoder = &intel_lvds->base;
- encoder = &intel_encoder->enc;
+ encoder = &intel_encoder->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
@@ -904,43 +933,41 @@ void intel_lvds_init(struct drm_device *dev)
* if closed, act like it's not there for now
*/
- /* Set up the DDC bus. */
- intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
- if (!intel_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- goto failed;
- }
-
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- dev_priv->lvds_edid_good = true;
+ intel_lvds->edid = drm_get_edid(connector,
+ &dev_priv->gmbus[pin].adapter);
- if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
- dev_priv->lvds_edid_good = false;
+ if (!intel_lvds->edid) {
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ }
list_for_each_entry(scan, &connector->probed_modes, head) {
- mutex_lock(&dev->mode_config.mutex);
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- dev_priv->panel_fixed_mode =
+ intel_lvds->fixed_mode =
drm_mode_duplicate(dev, scan);
- mutex_unlock(&dev->mode_config.mutex);
- intel_find_lvds_downclock(dev, connector);
+ intel_find_lvds_downclock(dev,
+ intel_lvds->fixed_mode,
+ connector);
goto out;
}
- mutex_unlock(&dev->mode_config.mutex);
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
- mutex_lock(&dev->mode_config.mutex);
- dev_priv->panel_fixed_mode =
+ intel_lvds->fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- mutex_unlock(&dev->mode_config.mutex);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out;
}
@@ -958,19 +985,19 @@ void intel_lvds_init(struct drm_device *dev)
lvds = I915_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_from_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
+ intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!dev_priv->panel_fixed_mode)
+ if (!intel_lvds->fixed_mode)
goto failed;
out:
@@ -997,8 +1024,6 @@ out:
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 4b1fd3d9c73c..f70b7cf32bff 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2007 Intel Corporation
+ * Copyright (c) 2007, 2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,11 +34,11 @@
* intel_ddc_probe
*
*/
-bool intel_ddc_probe(struct intel_encoder *intel_encoder)
+bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
{
+ struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
- int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
}
};
- intel_i2c_quirk_set(intel_encoder->enc.dev, true);
- ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_encoder->enc.dev, false);
- if (ret == 2)
- return true;
-
- return false;
+ return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
}
/**
@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(connector->dev, true);
edid = drm_get_edid(connector, adapter);
- intel_i2c_quirk_set(connector->dev, false);
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index ea5d3fea4b61..917c7dc3cd6b 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -31,17 +31,16 @@
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_drv.h"
#define PCI_ASLE 0xe4
-#define PCI_LBPC 0xf4
#define PCI_ASLS 0xfc
-#define OPREGION_SZ (8*1024)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
-#define OPREGION_VBT_OFFSET 0x1000
+#define OPREGION_VBT_OFFSET 0x400
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
@@ -143,40 +142,22 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#ifdef CONFIG_ACPI
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 blc_pwm_ctl, blc_pwm_ctl2;
- u32 max_backlight, level, shift;
+ u32 max;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
+ if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
- blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
-
- if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
- pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
- else {
- if (IS_PINEVIEW(dev)) {
- blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
- max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT;
- shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
- } else {
- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
- max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
- shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
- }
- level = (bclp * max_backlight) / 255;
- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
- }
+ max = intel_panel_get_max_backlight(dev);
+ intel_panel_set_backlight(dev, bclp * max / 255);
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
@@ -211,7 +192,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
return 0;
}
-void opregion_asle_intr(struct drm_device *dev)
+void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -243,37 +224,8 @@ void opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
-static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 cpu_pwm_ctl, pch_pwm_ctl2;
- u32 max_backlight, level;
-
- if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAILED;
-
- bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAILED;
-
- cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
- pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
- /* get the max PWM frequency */
- max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
- /* calculate the expected PMW frequency */
- level = (bclp * max_backlight) / 255;
- /* reserve the high 16 bits */
- cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
- /* write the updated PWM frequency */
- I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
-
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
- return 0;
-}
-
-void ironlake_opregion_gse_intr(struct drm_device *dev)
+/* Only present on Ironlake+ */
+void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -296,7 +248,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -315,7 +267,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
#define ASLE_PFIT_EN (1<<2)
#define ASLE_PFMB_EN (1<<3)
-void opregion_enable_asle(struct drm_device *dev)
+void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -464,7 +416,58 @@ blind_set:
goto end;
}
-int intel_opregion_init(struct drm_device *dev, int resume)
+void intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_didl_outputs(dev);
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
+ * We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ if (opregion->asle)
+ intel_opregion_enable_asle(dev);
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+#endif
+
+int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -479,29 +482,23 @@ int intel_opregion_init(struct drm_device *dev, int resume)
return -ENOTSUPP;
}
- base = ioremap(asls, OPREGION_SZ);
+ base = ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
- opregion->header = base;
- if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
+ opregion->header = base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_didl_outputs(dev);
- } else {
- DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
- err = -ENOTSUPP;
- goto err_out;
}
- opregion->enabled = 1;
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
@@ -510,53 +507,11 @@ int intel_opregion_init(struct drm_device *dev, int resume)
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
- opregion_enable_asle(dev);
}
- if (!resume)
- acpi_video_register();
-
-
- /* Notify BIOS we are ready to handle ACPI video ext notifs.
- * Right now, all the events are handled by the ACPI video module.
- * We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
-
- system_opregion = opregion;
- register_acpi_notifier(&intel_opregion_notifier);
-
return 0;
err_out:
iounmap(opregion->header);
- opregion->header = NULL;
- acpi_video_register();
return err;
}
-
-void intel_opregion_free(struct drm_device *dev, int suspend)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->enabled)
- return;
-
- if (!suspend)
- acpi_video_unregister();
-
- opregion->acpi->drdy = 0;
-
- system_opregion = NULL;
- unregister_acpi_notifier(&intel_opregion_notifier);
-
- /* just clear all opregion memory pointers now */
- iounmap(opregion->header);
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
-
- opregion->enabled = 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1d306a458be6..375316a8420e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -170,57 +170,143 @@ struct overlay_registers {
u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
};
-/* overlay flip addr flag */
-#define OFC_UPDATE 0x1
-
-#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
-
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ /* flip handling */
+ uint32_t last_flip_req;
+ void (*flip_tail)(struct intel_overlay *);
+};
-static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+static struct overlay_registers *
+intel_overlay_map_regs(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
struct overlay_registers *regs;
- /* no recursive mappings */
- BUG_ON(overlay->virt_addr);
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
- if (OVERLAY_NONPHYSICAL(overlay->dev)) {
- regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- overlay->reg_bo->gtt_offset,
- KM_USER0);
+ return regs;
+}
- if (!regs) {
- DRM_ERROR("failed to map overlay regs in GTT\n");
- return NULL;
- }
- } else
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap(regs);
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ struct drm_i915_gem_request *request,
+ bool interruptible,
+ void (*tail)(struct intel_overlay *))
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ BUG_ON(overlay->last_flip_req);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
- return overlay->virt_addr = regs;
+ overlay->flip_tail = tail;
+ ret = i915_do_wait_request(dev,
+ overlay->last_flip_req, true,
+ &dev_priv->render_ring);
+ if (ret)
+ return ret;
+
+ overlay->last_flip_req = 0;
+ return 0;
}
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+/* Workaround for i830 bug where pipe a must be enable to change control regs */
+static int
+i830_activate_pipe_a(struct drm_device *dev)
{
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_display_mode vesa_640x480 = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+ }, *mode;
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
+ if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
+ return 0;
- overlay->virt_addr = NULL;
+ /* most i8xx have pipe a forced on, so don't trust dpms mode */
+ if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+ return 0;
- return;
+ crtc_funcs = crtc->base.helper_private;
+ if (crtc_funcs->dpms == NULL)
+ return 0;
+
+ DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
+
+ mode = drm_mode_duplicate(dev, &vesa_640x480);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ if(!drm_crtc_helper_set_mode(&crtc->base, mode,
+ crtc->base.x, crtc->base.y,
+ crtc->base.fb))
+ return 0;
+
+ crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
+ return 1;
+}
+
+static void
+i830_deactivate_pipe_a(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_gem_request *request;
+ int pipe_a_quirk = 0;
int ret;
- drm_i915_private_t *dev_priv = dev->dev_private;
BUG_ON(overlay->active);
-
overlay->active = 1;
- overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+ if (IS_I830(dev)) {
+ pipe_a_quirk = i830_activate_pipe_a(dev);
+ if (pipe_a_quirk < 0)
+ return pipe_a_quirk;
+ }
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -229,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev,
- overlay->last_flip_req, 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
+ ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+out:
+ if (pipe_a_quirk)
+ i830_deactivate_pipe_a(dev);
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return 0;
+ return ret;
}
/* overlay needs to be enabled in OCMD reg */
-static void intel_overlay_continue(struct intel_overlay *overlay,
- bool load_polyphase_filter)
+static int intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
BUG_ON(!overlay->active);
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
@@ -269,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
ADVANCE_LP_RING();
overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+ i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ return 0;
}
-static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- u32 tmp;
-
- if (overlay->last_flip_req != 0) {
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret == 0) {
- overlay->last_flip_req = 0;
+ struct drm_gem_object *obj = &overlay->old_vid_bo->base;
- tmp = I915_READ(ISR);
-
- if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
- return 0;
- }
- }
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
- /* synchronous slowpath */
- overlay->hw_wedged = RELEASE_OLD_VID;
+ overlay->old_vid_bo = NULL;
+}
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_gem_object *obj;
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
+ obj = &overlay->vid_bo->base;
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->vid_bo = NULL;
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return 0;
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
}
/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay)
+static int intel_overlay_off(struct intel_overlay *overlay,
+ bool interruptible)
{
- u32 flip_addr = overlay->flip_addr;
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ u32 flip_addr = overlay->flip_addr;
+ struct drm_i915_gem_request *request;
BUG_ON(!overlay->active);
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
* this applies to the disabling of the overlay or to the switching off
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
+ BEGIN_LP_RING(6);
/* wait for overlay to go idle */
- overlay->hw_wedged = SWITCH_OFF_STAGE_1;
-
- BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return ret;
-}
-
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
-{
- struct drm_gem_object *obj;
-
- /* never have the overlay hw on without showing a frame */
- BUG_ON(!overlay->vid_bo);
- obj = &overlay->vid_bo->base;
-
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->vid_bo = NULL;
-
- overlay->crtc->overlay = NULL;
- overlay->crtc = NULL;
- overlay->active = 0;
+ return intel_overlay_do_wait_request(overlay, request, interruptible,
+ intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
-int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- int interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ bool interruptible)
{
struct drm_device *dev = overlay->dev;
- struct drm_gem_object *obj;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 flip_addr;
int ret;
- if (overlay->hw_wedged == HW_WEDGED)
- return -EIO;
-
- if (overlay->last_flip_req == 0) {
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
- }
+ if (overlay->last_flip_req == 0)
+ return 0;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
- if (ret != 0)
+ interruptible, &dev_priv->render_ring);
+ if (ret)
return ret;
- switch (overlay->hw_wedged) {
- case RELEASE_OLD_VID:
- obj = &overlay->old_vid_bo->base;
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->old_vid_bo = NULL;
- break;
- case SWITCH_OFF_STAGE_1:
- flip_addr = overlay->flip_addr;
- flip_addr |= OFC_UPDATE;
-
- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req = i915_add_request(dev, NULL,
- 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
- case SWITCH_OFF_STAGE_2:
- intel_overlay_off_tail(overlay);
- break;
- default:
- BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
- }
+ if (overlay->flip_tail)
+ overlay->flip_tail(overlay);
- overlay->hw_wedged = 0;
overlay->last_flip_req = 0;
return 0;
}
/* Wait for pending overlay flip and release old frame.
* Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs_atomic */
+ * via intel_overlay_(un)map_regs
+ */
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- struct drm_gem_object *obj;
- /* only wait if there is actually an old frame to release to
- * guarantee forward progress */
+ /* Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
if (!overlay->old_vid_bo)
return 0;
- ret = intel_overlay_wait_flip(overlay);
- if (ret != 0)
- return ret;
+ if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ struct drm_i915_gem_request *request;
- obj = &overlay->old_vid_bo->base;
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->old_vid_bo = NULL;
+ /* synchronous slowpath */
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ ret = intel_overlay_do_wait_request(overlay, request, true,
+ intel_overlay_release_old_vid_tail);
+ if (ret)
+ return ret;
+ }
+
+ intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -506,65 +502,65 @@ struct put_image_params {
static int packed_depth_bytes(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- return 4;
- case I915_OVERLAY_YUV411:
- /* return 6; not implemented */
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
}
}
static int packed_width_bytes(u32 format, short width)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- return width << 1;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
}
}
static int uv_hsubsampling(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- case I915_OVERLAY_YUV420:
- return 2;
- case I915_OVERLAY_YUV411:
- case I915_OVERLAY_YUV410:
- return 4;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
}
}
static int uv_vsubsampling(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV420:
- case I915_OVERLAY_YUV410:
- return 2;
- case I915_OVERLAY_YUV422:
- case I915_OVERLAY_YUV411:
- return 1;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
}
}
static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
{
u32 mask, shift, ret;
- if (IS_I9XX(dev)) {
- mask = 0x3f;
- shift = 6;
- } else {
+ if (IS_GEN2(dev)) {
mask = 0x1f;
shift = 5;
+ } else {
+ mask = 0x3f;
+ shift = 6;
}
ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (IS_I9XX(dev))
+ if (!IS_GEN2(dev))
ret <<= 1;
ret -=1;
return ret << 2;
@@ -587,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
- 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
@@ -597,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
- 0x3000, 0x0800, 0x3000};
+ 0x3000, 0x0800, 0x3000
+};
static void update_polyphase_filter(struct overlay_registers *regs)
{
@@ -630,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
yscale = 1 << FP_SHIFT;
/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
- xscale_UV = xscale/uv_hscale;
- yscale_UV = yscale/uv_vscale;
- /* make the Y scale to UV scale ratio an exact multiply */
- xscale = xscale_UV * uv_hscale;
- yscale = yscale_UV * uv_vscale;
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
/*} else {
- xscale_UV = 0;
- yscale_UV = 0;
- }*/
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
scale_changed = true;
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
- | ((xscale >> FP_SHIFT) << 16)
- | ((xscale & FRACT_MASK) << 3);
- regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
- | ((xscale_UV >> FP_SHIFT) << 16)
- | ((xscale_UV & FRACT_MASK) << 3);
- regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
- | ((yscale_UV >> FP_SHIFT) << 0);
+ regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3));
+
+ regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3));
+
+ regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)));
if (scale_changed)
update_polyphase_filter(regs);
@@ -664,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
struct overlay_registers *regs)
{
u32 key = overlay->color_key;
+
switch (overlay->crtc->base.fb->bits_per_pixel) {
- case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
- case 16:
- if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
- } else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
- }
- case 24:
- case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ break;
+
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ break;
+
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ break;
}
}
@@ -689,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
if (params->format & I915_OVERLAY_YUV_PLANAR) {
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- cmd |= OCMD_YUV_422_PLANAR;
- break;
- case I915_OVERLAY_YUV420:
- cmd |= OCMD_YUV_420_PLANAR;
- break;
- case I915_OVERLAY_YUV411:
- case I915_OVERLAY_YUV410:
- cmd |= OCMD_YUV_410_PLANAR;
- break;
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
}
} else { /* YUV packed */
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- cmd |= OCMD_YUV_422_PACKED;
- break;
- case I915_OVERLAY_YUV411:
- cmd |= OCMD_YUV_411_PACKED;
- break;
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
}
switch (params->format & I915_OVERLAY_SWAP_MASK) {
- case I915_OVERLAY_NO_SWAP:
- break;
- case I915_OVERLAY_UV_SWAP:
- cmd |= OCMD_UV_SWAP;
- break;
- case I915_OVERLAY_Y_SWAP:
- cmd |= OCMD_Y_SWAP;
- break;
- case I915_OVERLAY_Y_AND_UV_SWAP:
- cmd |= OCMD_Y_AND_UV_SWAP;
- break;
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
}
}
return cmd;
}
-int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_gem_object *new_bo,
- struct put_image_params *params)
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_gem_object *new_bo,
+ struct put_image_params *params)
{
int ret, tmp_width;
struct overlay_registers *regs;
@@ -755,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
if (!overlay->active) {
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
regs->OCONFIG = OCONF_CC_OUT_8BIT;
- if (IS_I965GM(overlay->dev))
+ if (IS_GEN4(overlay->dev))
regs->OCONFIG |= OCONF_CSC_MODE_BT709;
regs->OCONFIG |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
if (ret != 0)
goto out_unpin;
}
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
@@ -788,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTH = params->src_w;
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
- params->offset_Y, tmp_width);
+ params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
@@ -799,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
u32 tmp_U, tmp_V;
regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
- params->src_w/uv_hscale);
+ params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
- params->src_w/uv_hscale);
+ params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
@@ -815,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->OCMD = overlay_cmd_reg(params);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
- intel_overlay_continue(overlay, scale_changed);
+ ret = intel_overlay_continue(overlay, scale_changed);
+ if (ret)
+ goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = to_intel_bo(new_bo);
@@ -829,20 +838,19 @@ out_unpin:
return ret;
}
-int intel_overlay_switch_off(struct intel_overlay *overlay)
+int intel_overlay_switch_off(struct intel_overlay *overlay,
+ bool interruptible)
{
- int ret;
struct overlay_registers *regs;
struct drm_device *dev = overlay->dev;
+ int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
- if (overlay->hw_wedged) {
- ret = intel_overlay_recover_from_interrupt(overlay, 1);
- if (ret != 0)
- return ret;
- }
+ ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+ if (ret != 0)
+ return ret;
if (!overlay->active)
return 0;
@@ -851,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
regs->OCMD = 0;
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_off(overlay);
+ ret = intel_overlay_off(overlay, interruptible);
if (ret != 0)
return ret;
intel_overlay_off_tail(overlay);
-
return 0;
}
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
struct intel_crtc *crtc)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- u32 pipeconf;
- int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!crtc->active)
return -EINVAL;
- pipeconf = I915_READ(pipeconf_reg);
-
/* can't use the overlay with double wide pipe */
- if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+ if (INTEL_INFO(overlay->dev)->gen < 4 &&
+ (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
return -EINVAL;
return 0;
@@ -886,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 ratio;
+ drm_i915_private_t *dev_priv = dev->dev_private;
u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
- * line with the intel documentation for the i965 */
- if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
- ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
- } else { /* on i965 use the PGM reg to read out the autoscaler values */
- ratio = I915_READ(PFIT_PGM_RATIOS);
- if (IS_I965G(dev))
- ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+ * line with the intel documentation for the i965
+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ } else {
+ if (pfit_control & VERT_AUTO_SCALE)
+ ratio = I915_READ(PFIT_AUTO_RATIOS);
else
- ratio >>= PFIT_VERT_SCALE_SHIFT;
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
}
overlay->pfit_vscale_ratio = ratio;
@@ -910,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
{
struct drm_display_mode *mode = &overlay->crtc->base.mode;
- if ((rec->dst_x < mode->crtc_hdisplay)
- && (rec->dst_x + rec->dst_width
- <= mode->crtc_hdisplay)
- && (rec->dst_y < mode->crtc_vdisplay)
- && (rec->dst_y + rec->dst_height
- <= mode->crtc_vdisplay))
+ if (rec->dst_x < mode->crtc_hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
+ rec->dst_y < mode->crtc_vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
return 0;
else
return -EINVAL;
@@ -940,53 +944,57 @@ static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec,
struct drm_gem_object *new_bo)
{
- u32 stride_mask;
- int depth;
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
- size_t tmp;
+ u32 stride_mask, depth, tmp;
/* check src dimensions */
if (IS_845G(dev) || IS_I830(dev)) {
- if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
- || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
} else {
- if (rec->src_height > IMAGE_MAX_HEIGHT
- || rec->src_width > IMAGE_MAX_WIDTH)
+ if (rec->src_height > IMAGE_MAX_HEIGHT ||
+ rec->src_width > IMAGE_MAX_WIDTH)
return -EINVAL;
}
+
/* better safe than sorry, use 4 as the maximal subsampling ratio */
- if (rec->src_height < N_VERT_Y_TAPS*4
- || rec->src_width < N_HORIZ_Y_TAPS*4)
+ if (rec->src_height < N_VERT_Y_TAPS*4 ||
+ rec->src_width < N_HORIZ_Y_TAPS*4)
return -EINVAL;
/* check alignment constraints */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
- case I915_OVERLAY_RGB:
- /* not implemented */
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+
+ case I915_OVERLAY_YUV_PACKED:
+ if (uv_vscale != 1)
return -EINVAL;
- case I915_OVERLAY_YUV_PACKED:
- depth = packed_depth_bytes(rec->flags);
- if (uv_vscale != 1)
- return -EINVAL;
- if (depth < 0)
- return depth;
- /* ignore UV planes */
- rec->stride_UV = 0;
- rec->offset_U = 0;
- rec->offset_V = 0;
- /* check pixel alignment */
- if (rec->offset_Y % depth)
- return -EINVAL;
- break;
- case I915_OVERLAY_YUV_PLANAR:
- if (uv_vscale < 0 || uv_hscale < 0)
- return -EINVAL;
- /* no offset restrictions for planar formats */
- break;
- default:
+
+ depth = packed_depth_bytes(rec->flags);
+ if (depth < 0)
+ return depth;
+
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+
+ default:
+ return -EINVAL;
}
if (rec->src_width % uv_hscale)
@@ -1000,47 +1008,74 @@ static int check_overlay_src(struct drm_device *dev,
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_I965G(dev) && rec->stride_Y < 512)
+ if (IS_GEN4(dev) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
- 4 : 8;
- if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+ 4096 : 8192;
+ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
return -EINVAL;
/* check buffer dimensions */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
- case I915_OVERLAY_RGB:
- case I915_OVERLAY_YUV_PACKED:
- /* always 4 Y values per depth pixels */
- if (packed_width_bytes(rec->flags, rec->src_width)
- > rec->stride_Y)
- return -EINVAL;
-
- tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
- return -EINVAL;
- break;
- case I915_OVERLAY_YUV_PLANAR:
- if (rec->src_width > rec->stride_Y)
- return -EINVAL;
- if (rec->src_width/uv_hscale > rec->stride_UV)
- return -EINVAL;
-
- tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
- return -EINVAL;
- tmp = rec->stride_UV*rec->src_height;
- tmp /= uv_vscale;
- if (rec->offset_U + tmp > new_bo->size
- || rec->offset_V + tmp > new_bo->size)
- return -EINVAL;
- break;
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y * rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+
+ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+ if (rec->offset_U + tmp > new_bo->size ||
+ rec->offset_V + tmp > new_bo->size)
+ return -EINVAL;
+ break;
}
return 0;
}
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /* i830 doesn't have a panel fitter */
+ if (IS_I830(dev))
+ return -1;
+
+ pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ if (IS_GEN4(dev))
+ return (pfit_control >> 29) & 0x3;
+
+ /* older chips can only use pipe 1 */
+ return 1;
+}
+
int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1068,7 +1103,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- ret = intel_overlay_switch_off(overlay);
+ ret = intel_overlay_switch_off(overlay, true);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1081,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
return -ENOMEM;
drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
- DRM_MODE_OBJECT_CRTC);
+ DRM_MODE_OBJECT_CRTC);
if (!drmmode_obj) {
ret = -ENOENT;
goto out_free;
@@ -1089,7 +1124,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
new_bo = drm_gem_object_lookup(dev, file_priv,
- put_image_rec->bo_handle);
+ put_image_rec->bo_handle);
if (!new_bo) {
ret = -ENOENT;
goto out_free;
@@ -1098,15 +1133,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- if (overlay->hw_wedged) {
- ret = intel_overlay_recover_from_interrupt(overlay, 1);
- if (ret != 0)
- goto out_unlock;
- }
+ ret = intel_overlay_recover_from_interrupt(overlay, true);
+ if (ret != 0)
+ goto out_unlock;
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
- ret = intel_overlay_switch_off(overlay);
+ ret = intel_overlay_switch_off(overlay, true);
if (ret != 0)
goto out_unlock;
@@ -1117,9 +1150,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
overlay->crtc = crtc;
crtc->overlay = overlay;
- if (intel_panel_fitter_pipe(dev) == crtc->pipe
- /* and line to wide, i.e. one-line-mode */
- && mode->hdisplay > 1024) {
+ /* line too wide, i.e. one-line-mode */
+ if (mode->hdisplay > 1024 &&
+ intel_panel_fitter_pipe(dev) == crtc->pipe) {
overlay->pfit_active = 1;
update_pfit_vscale_ratio(overlay);
} else
@@ -1132,10 +1165,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
if (overlay->pfit_active) {
params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
- overlay->pfit_vscale_ratio);
+ overlay->pfit_vscale_ratio);
/* shifting right rounds downwards, so add 1 */
params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
- overlay->pfit_vscale_ratio) + 1;
+ overlay->pfit_vscale_ratio) + 1;
} else {
params->dst_y = put_image_rec->dst_y;
params->dst_h = put_image_rec->dst_height;
@@ -1147,8 +1180,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
params->src_h = put_image_rec->src_height;
params->src_scan_w = put_image_rec->src_scan_width;
params->src_scan_h = put_image_rec->src_scan_height;
- if (params->src_scan_h > params->src_h
- || params->src_scan_w > params->src_w) {
+ if (params->src_scan_h > params->src_h ||
+ params->src_scan_w > params->src_w) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1204,7 +1237,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
return false;
for (i = 0; i < 3; i++) {
- if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
return false;
}
@@ -1225,16 +1258,18 @@ static bool check_gamma5_errata(u32 gamma5)
static int check_gamma(struct drm_intel_overlay_attrs *attrs)
{
- if (!check_gamma_bounds(0, attrs->gamma0)
- || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
- || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
- || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
- || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
- || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
- || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ if (!check_gamma_bounds(0, attrs->gamma0) ||
+ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
return -EINVAL;
+
if (!check_gamma5_errata(attrs->gamma5))
return -EINVAL;
+
return 0;
}
@@ -1261,13 +1296,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
+ ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
- attrs->color_key = overlay->color_key;
+ attrs->color_key = overlay->color_key;
attrs->brightness = overlay->brightness;
- attrs->contrast = overlay->contrast;
+ attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1275,29 +1311,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
attrs->gamma4 = I915_READ(OGAMC4);
attrs->gamma5 = I915_READ(OGAMC5);
}
- ret = 0;
} else {
- overlay->color_key = attrs->color_key;
- if (attrs->brightness >= -128 && attrs->brightness <= 127) {
- overlay->brightness = attrs->brightness;
- } else {
- ret = -EINVAL;
+ if (attrs->brightness < -128 || attrs->brightness > 127)
goto out_unlock;
- }
- if (attrs->contrast <= 255) {
- overlay->contrast = attrs->contrast;
- } else {
- ret = -EINVAL;
+ if (attrs->contrast > 255)
goto out_unlock;
- }
- if (attrs->saturation <= 1023) {
- overlay->saturation = attrs->saturation;
- } else {
- ret = -EINVAL;
+ if (attrs->saturation > 1023)
goto out_unlock;
- }
- regs = intel_overlay_map_regs_atomic(overlay);
+ overlay->color_key = attrs->color_key;
+ overlay->brightness = attrs->brightness;
+ overlay->contrast = attrs->contrast;
+ overlay->saturation = attrs->saturation;
+
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unlock;
@@ -1305,13 +1332,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
update_reg_attrs(overlay, regs);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (!IS_I9XX(dev)) {
- ret = -EINVAL;
+ if (IS_GEN2(dev))
goto out_unlock;
- }
if (overlay->active) {
ret = -EBUSY;
@@ -1319,7 +1344,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
}
ret = check_gamma(attrs);
- if (ret != 0)
+ if (ret)
goto out_unlock;
I915_WRITE(OGAMC0, attrs->gamma0);
@@ -1329,9 +1354,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
I915_WRITE(OGAMC4, attrs->gamma4);
I915_WRITE(OGAMC5, attrs->gamma5);
}
- ret = 0;
}
+ ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1347,7 +1372,7 @@ void intel_setup_overlay(struct drm_device *dev)
struct overlay_registers *regs;
int ret;
- if (!OVERLAY_EXISTS(dev))
+ if (!HAS_OVERLAY(dev))
return;
overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
@@ -1360,22 +1385,28 @@ void intel_setup_overlay(struct drm_device *dev)
goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo);
- if (OVERLAY_NONPHYSICAL(dev)) {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
- if (ret) {
- DRM_ERROR("failed to pin overlay register bo\n");
- goto out_free_bo;
- }
- overlay->flip_addr = overlay->reg_bo->gtt_offset;
- } else {
+ if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
- 0);
+ PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ } else {
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->gtt_offset;
+
+ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+ if (ret) {
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
}
/* init all values */
@@ -1384,21 +1415,22 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->contrast = 75;
overlay->saturation = 146;
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs)
goto out_free_bo;
memset(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
-
update_reg_attrs(overlay, regs);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
DRM_INFO("initialized overlay support\n");
return;
+out_unpin_bo:
+ i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(reg_bo);
out_free:
@@ -1408,18 +1440,23 @@ out_free:
void intel_cleanup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->overlay) {
- /* The bo's should be free'd by the generic code already.
- * Furthermore modesetting teardown happens beforehand so the
- * hardware should be off already */
- BUG_ON(dev_priv->overlay->active);
+ if (!dev_priv->overlay)
+ return;
- kfree(dev_priv->overlay);
- }
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
+
+ drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+ kfree(dev_priv->overlay);
}
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1427,6 +1464,32 @@ struct intel_overlay_error_state {
u32 isr;
};
+static struct overlay_registers *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay,
+ int slot)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset,
+ slot);
+
+ return regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+ int slot,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(regs, slot);
+}
+
+
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev)
{
@@ -1444,17 +1507,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->gtt_offset;
- else
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ error->base = (long) overlay->reg_bo->gtt_offset;
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs_atomic(overlay, KM_IRQ0);
if (!regs)
goto err;
memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs_atomic(overlay, KM_IRQ0, regs);
return error;
@@ -1515,3 +1578,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
P(UVSCALEV);
#undef P
}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e7f5299d9d57..92ff8f385278 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,8 @@
#include "intel_drv.h"
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -109,3 +111,110 @@ done:
dev_priv->pch_pf_pos = (x << 16) | y;
dev_priv->pch_pf_size = (width << 16) | height;
}
+
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+ if (IS_GEN2(dev))
+ return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+ return 0;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 max;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+ } else {
+ max = I915_READ(BLC_PWM_CTL);
+ if (IS_PINEVIEW(dev)) {
+ max >>= 17;
+ } else {
+ max >>= 16;
+ if (INTEL_INFO(dev)->gen < 4)
+ max &= ~1;
+ }
+
+ if (is_backlight_combination_mode(dev))
+ max *= 0xff;
+ }
+
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ DRM_ERROR("fixme: max PWM is zero.\n");
+ max = 1;
+ }
+
+ DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+ return max;
+}
+
+u32 intel_panel_get_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (IS_PINEVIEW(dev))
+ val >>= 1;
+
+ if (is_backlight_combination_mode(dev)){
+ u8 lbpc;
+
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+ val *= lbpc;
+ val >>= 1;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+}
+
+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_pch_panel_set_backlight(dev, level);
+
+ if (is_backlight_combination_mode(dev)){
+ u32 max = intel_panel_get_max_backlight(dev);
+ u8 lpbc;
+
+ lpbc = level * 0xfe / max + 1;
+ level /= lpbc;
+ pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL);
+ if (IS_PINEVIEW(dev)) {
+ tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ level <<= 1;
+ } else
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cb3508f78bc3..d89b88791aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_drm.h"
#include "i915_trace.h"
+#include "intel_drv.h"
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
static void
render_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
if ((invalidate_domains|flush_domains) &
I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
/*
* On the 965, the sampler cache always gets flushed
* and this bit is reserved.
@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
}
}
-static unsigned int render_ring_get_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(PRB0_HEAD) & HEAD_ADDR;
-}
-
-static unsigned int render_ring_get_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void ring_set_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ I915_WRITE_TAIL(ring, ring->tail);
}
-static unsigned int render_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+ u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+ RING_ACTHD(ring->mmio_base) : ACTHD;
return I915_READ(acthd_reg);
}
-static void render_ring_advance_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(PRB0_TAIL, ring->tail);
-}
-
static int init_ring_common(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
u32 head;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
obj_priv = to_intel_bo(ring->gem_object);
/* Stop the ring if it's running. */
- I915_WRITE(ring->regs.ctl, 0);
- I915_WRITE(ring->regs.head, 0);
- I915_WRITE(ring->regs.tail, 0);
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+ ring->set_tail(dev, ring, 0);
/* Initialize the ring. */
- I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
- head = ring->get_head(dev, ring);
+ I915_WRITE_START(ring, obj_priv->gtt_offset);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
DRM_ERROR("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
- I915_WRITE(ring->regs.head, 0);
+ I915_WRITE_HEAD(ring, 0);
DRM_ERROR("%s head forced to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
}
- I915_WRITE(ring->regs.ctl,
+ I915_WRITE_CTL(ring,
((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_NO_REPORT | RING_VALID);
- head = I915_READ(ring->regs.head) & HEAD_ADDR;
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
if (head != 0) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
return -EIO;
}
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
else {
- ring->head = ring->get_head(dev, ring);
- ring->tail = ring->get_tail(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
}
static int init_render_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = init_ring_common(dev, ring);
int mode;
- if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+ if (INTEL_INFO(dev)->gen > 3) {
mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
@@ -250,9 +239,8 @@ do { \
*/
static u32
render_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
- u32 flush_domains)
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
}
static u32
-render_ring_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+render_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (HAS_PIPE_CONTROL(dev))
@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
static void
render_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
static void
render_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
-static void render_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (IS_GEN6(dev)) {
- I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
- I915_READ(HWS_PGA_GEN6); /* posting read */
+ I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
+ ring->status_page.gfx_addr);
+ I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
} else {
- I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
+ I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+ ring->status_page.gfx_addr);
+ I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
}
}
-void
+static void
bsd_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
-static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_ACTHD);
-}
-
-static inline void bsd_ring_advance_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(BSD_RING_TAIL, ring->tail);
-}
-
static int init_bsd_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
return init_ring_common(dev, ring);
}
static u32
bsd_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
- u32 flush_domains)
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
{
u32 seqno;
@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
return seqno;
}
-static void bsd_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
- I915_READ(BSD_HWS_PGA);
-}
-
static void
bsd_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
/* do nothing */
}
static void
bsd_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
/* do nothing */
}
static u32
-bsd_ring_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+bsd_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static int
bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
{
uint32_t exec_start;
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
@@ -491,10 +444,10 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
static int
render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = exec->num_cliprects;
@@ -524,7 +477,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_emit(dev, ring, 0);
} else {
intel_ring_begin(dev, ring, 4);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
intel_ring_emit(dev, ring,
MI_BATCH_BUFFER_START | (2 << 6)
| MI_BATCH_NON_SECURE_I965);
@@ -553,7 +506,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
}
static void cleanup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
@@ -573,7 +526,7 @@ static void cleanup_status_page(struct drm_device *dev,
}
static int init_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
@@ -603,7 +556,7 @@ static int init_status_page(struct drm_device *dev,
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- ring->setup_status_page(dev, ring);
+ intel_ring_setup_status_page(dev, ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@@ -617,15 +570,17 @@ err:
return ret;
}
-
int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
int ret;
ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(dev, ring);
@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
ring->gem_object = obj;
- ret = i915_gem_object_pin(obj, ring->alignment);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE);
if (ret)
goto err_unref;
@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
else {
- ring->head = ring->get_head(dev, ring);
- ring->tail = ring->get_tail(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
}
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
return ret;
err_unmap:
@@ -691,7 +644,7 @@ err_hws:
}
void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
if (ring->gem_object == NULL)
return;
@@ -704,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
cleanup_status_page(dev, ring);
}
-int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
unsigned int *virt;
int rem;
@@ -731,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
}
int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n)
+ struct intel_ring_buffer *ring, int n)
{
unsigned long end;
+ drm_i915_private_t *dev_priv = dev->dev_private;
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
- ring->head = ring->get_head(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -760,7 +714,8 @@ int intel_wait_ring_buffer(struct drm_device *dev,
}
void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int num_dwords)
+ struct intel_ring_buffer *ring,
+ int num_dwords)
{
int n = 4*num_dwords;
if (unlikely(ring->tail + n > ring->size))
@@ -772,16 +727,16 @@ void intel_ring_begin(struct drm_device *dev,
}
void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
- ring->advance_ring(dev, ring);
+ ring->set_tail(dev, ring, ring->tail);
}
void intel_fill_struct(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- void *data,
- unsigned int len)
+ struct intel_ring_buffer *ring,
+ void *data,
+ unsigned int len)
{
unsigned int *virt = ring->virtual_start + ring->tail;
BUG_ON((len&~(4-1)) != 0);
@@ -793,76 +748,136 @@ void intel_fill_struct(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
-struct intel_ring_buffer render_ring = {
+static const struct intel_ring_buffer render_ring = {
.name = "render ring",
- .regs = {
- .ctl = PRB0_CTL,
- .head = PRB0_HEAD,
- .tail = PRB0_TAIL,
- .start = PRB0_START
- },
- .ring_flag = I915_EXEC_RENDER,
+ .id = RING_RENDER,
+ .mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE,
- .alignment = PAGE_SIZE,
- .virtual_start = NULL,
- .dev = NULL,
- .gem_object = NULL,
- .head = 0,
- .tail = 0,
- .space = 0,
- .user_irq_refcount = 0,
- .irq_gem_seqno = 0,
- .waiting_gem_seqno = 0,
- .setup_status_page = render_setup_status_page,
.init = init_render_ring,
- .get_head = render_ring_get_head,
- .get_tail = render_ring_get_tail,
- .get_active_head = render_ring_get_active_head,
- .advance_ring = render_ring_advance_ring,
+ .set_tail = ring_set_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
- .get_gem_seqno = render_ring_get_gem_seqno,
+ .get_seqno = render_ring_get_seqno,
.user_irq_get = render_ring_get_user_irq,
.user_irq_put = render_ring_put_user_irq,
.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
- .status_page = {NULL, 0, NULL},
- .map = {0,}
};
/* ring buffer for bit-stream decoder */
-struct intel_ring_buffer bsd_ring = {
+static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring",
- .regs = {
- .ctl = BSD_RING_CTL,
- .head = BSD_RING_HEAD,
- .tail = BSD_RING_TAIL,
- .start = BSD_RING_START
- },
- .ring_flag = I915_EXEC_BSD,
+ .id = RING_BSD,
+ .mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
- .alignment = PAGE_SIZE,
- .virtual_start = NULL,
- .dev = NULL,
- .gem_object = NULL,
- .head = 0,
- .tail = 0,
- .space = 0,
- .user_irq_refcount = 0,
- .irq_gem_seqno = 0,
- .waiting_gem_seqno = 0,
- .setup_status_page = bsd_setup_status_page,
.init = init_bsd_ring,
- .get_head = bsd_ring_get_head,
- .get_tail = bsd_ring_get_tail,
- .get_active_head = bsd_ring_get_active_head,
- .advance_ring = bsd_ring_advance_ring,
+ .set_tail = ring_set_tail,
.flush = bsd_ring_flush,
.add_request = bsd_ring_add_request,
- .get_gem_seqno = bsd_ring_get_gem_seqno,
+ .get_seqno = bsd_ring_get_seqno,
.user_irq_get = bsd_ring_get_user_irq,
.user_irq_put = bsd_ring_put_user_irq,
.dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
- .status_page = {NULL, 0, NULL},
- .map = {0,}
};
+
+
+static void gen6_bsd_ring_set_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Every tail move must follow the sequence below */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+ I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+ I915_WRITE_TAIL(ring, value);
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+}
+
+static void gen6_bsd_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_FLUSH_DW);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_advance(dev, ring);
+}
+
+static int
+gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(dev, ring, exec_start);
+ intel_ring_advance(dev, ring);
+
+ return 0;
+}
+
+/* ring buffer for Video Codec for Gen6+ */
+static const struct intel_ring_buffer gen6_bsd_ring = {
+ .name = "gen6 bsd ring",
+ .id = RING_BSD,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_bsd_ring,
+ .set_tail = gen6_bsd_ring_set_tail,
+ .flush = gen6_bsd_ring_flush,
+ .add_request = bsd_ring_add_request,
+ .get_seqno = bsd_ring_get_seqno,
+ .user_irq_get = bsd_ring_get_user_irq,
+ .user_irq_put = bsd_ring_put_user_irq,
+ .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer,
+};
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ dev_priv->render_ring = render_ring;
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
+ memset(dev_priv->render_ring.status_page.page_addr,
+ 0, PAGE_SIZE);
+ }
+
+ return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_GEN6(dev))
+ dev_priv->bsd_ring = gen6_bsd_ring;
+ else
+ dev_priv->bsd_ring = bsd_ring;
+
+ return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 525e7d3edda8..9725f783db20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -7,25 +7,31 @@ struct intel_hw_status_page {
struct drm_gem_object *obj;
};
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
+#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
+#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+
struct drm_i915_gem_execbuffer2;
struct intel_ring_buffer {
const char *name;
- struct ring_regs {
- u32 ctl;
- u32 head;
- u32 tail;
- u32 start;
- } regs;
- unsigned int ring_flag;
+ enum intel_ring_id {
+ RING_RENDER = 0x1,
+ RING_BSD = 0x2,
+ } id;
+ u32 mmio_base;
unsigned long size;
- unsigned int alignment;
void *virtual_start;
struct drm_device *dev;
struct drm_gem_object *gem_object;
unsigned int head;
unsigned int tail;
- unsigned int space;
+ int space;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -35,30 +41,22 @@ struct intel_ring_buffer {
struct intel_ring_buffer *ring);
void (*user_irq_put)(struct drm_device *dev,
struct intel_ring_buffer *ring);
- void (*setup_status_page)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
int (*init)(struct drm_device *dev,
struct intel_ring_buffer *ring);
- unsigned int (*get_head)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- unsigned int (*get_tail)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- unsigned int (*get_active_head)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- void (*advance_ring)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ void (*set_tail)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value);
void (*flush)(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
u32 (*add_request)(struct drm_device *dev,
struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
u32 flush_domains);
- u32 (*get_gem_seqno)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ u32 (*get_seqno)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
int (*dispatch_gem_execbuffer)(struct drm_device *dev,
struct intel_ring_buffer *ring,
struct drm_i915_gem_execbuffer2 *exec,
@@ -83,6 +81,11 @@ struct intel_ring_buffer {
*/
struct list_head request_list;
+ /**
+ * Do we have some not yet emitted requests outstanding?
+ */
+ bool outstanding_lazy_request;
+
wait_queue_head_t irq_queue;
drm_local_map_t map;
};
@@ -96,15 +99,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
}
int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring);
void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring);
int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring, int n);
void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
+ struct intel_ring_buffer *ring, int n);
static inline void intel_ring_emit(struct drm_device *dev,
struct intel_ring_buffer *ring,
@@ -125,7 +126,12 @@ void intel_ring_advance(struct drm_device *dev,
u32 intel_ring_get_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring);
-extern struct intel_ring_buffer render_ring;
-extern struct intel_ring_buffer bsd_ring;
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e8e902d614ed..a84224f37605 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
struct intel_sdvo {
struct intel_encoder base;
+ struct i2c_adapter *i2c;
u8 slave_addr;
+ struct i2c_adapter ddc;
+
/* Register for the SDVO device: SDVOB or SDVOC */
int sdvo_reg;
@@ -106,16 +109,12 @@ struct intel_sdvo {
bool is_hdmi;
/**
- * This is set if we detect output of sdvo device as LVDS.
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
*/
bool is_lvds;
/**
- * This is sdvo flags for input timing.
- */
- uint8_t sdvo_flags;
-
- /**
* This is sdvo fixed pannel mode pointer
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
@@ -129,9 +128,8 @@ struct intel_sdvo {
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
- /* Mac mini hack -- use the same DDC as the analog connector */
- struct i2c_adapter *analog_ddc_bus;
-
+ /* Input timings for adjusted_mode */
+ struct intel_sdvo_dtd input_dtd;
};
struct intel_sdvo_connector {
@@ -186,9 +184,15 @@ struct intel_sdvo_connector {
u32 cur_dot_crawl, max_dot_crawl;
};
-static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
+static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_sdvo, base.base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
+ return container_of(intel_attached_encoder(connector),
+ struct intel_sdvo, base);
}
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -213,7 +217,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
*/
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 bval = val, cval = val;
int i;
@@ -245,49 +249,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
- u8 out_buf[2] = { addr, 0 };
- u8 buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &addr,
},
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
- .buf = buf,
+ .buf = ch,
}
};
int ret;
- if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
- {
- *ch = buf[0];
+ if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
return true;
- }
DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
return false;
}
-static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
-{
- u8 out_buf[2] = { addr, ch };
- struct i2c_msg msgs[] = {
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- }
- };
-
- return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
-}
-
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
@@ -432,22 +416,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
DRM_LOG_KMS("\n");
}
-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
- const void *args, int args_len)
-{
- int i;
-
- intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
-
- for (i = 0; i < args_len; i++) {
- if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
- ((u8*)args)[i]))
- return false;
- }
-
- return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
-}
-
static const char *cmd_status_names[] = {
"Power on",
"Success",
@@ -458,54 +426,115 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
- void *response, int response_len,
- u8 status)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
- int i;
+ u8 buf[args_len*2 + 2], status;
+ struct i2c_msg msgs[args_len + 3];
+ int i, ret;
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
- for (i = 0; i < response_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
- for (; i < 8; i++)
- DRM_LOG_KMS(" ");
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_LOG_KMS("(%s)", cmd_status_names[status]);
- else
- DRM_LOG_KMS("(??? %d)", status);
- DRM_LOG_KMS("\n");
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return false;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ return false;
+ }
+
+ i = 3;
+ while (status == SDVO_CMD_STATUS_PENDING && i--) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ return false;
+ }
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("command returns response %s [%d]\n",
+ status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
+ status);
+ return false;
+ }
+
+ return true;
}
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
- int i;
+ u8 retry = 5;
u8 status;
- u8 retry = 50;
-
- while (retry--) {
- /* Read the command response */
- for (i = 0; i < response_len; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *)response)[i]))
- return false;
- }
+ int i;
- /* read the return status */
- if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ */
+ do {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
&status))
return false;
+ } while (status == SDVO_CMD_STATUS_PENDING && --retry);
- intel_sdvo_debug_response(intel_sdvo, response, response_len,
- status);
- if (status != SDVO_CMD_STATUS_PENDING)
- break;
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_LOG_KMS("(??? %d)", status);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
- mdelay(50);
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
}
+ DRM_LOG_KMS("\n");
+ return true;
- return status == SDVO_CMD_STATUS_SUCCESS;
+log_fail:
+ DRM_LOG_KMS("\n");
+ return false;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -518,71 +547,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
return 4;
}
-/**
- * Try to read the response after issuie the DDC switch command. But it
- * is noted that we must do the action of reading response and issuing DDC
- * switch command in one I2C transaction. Otherwise when we try to start
- * another I2C transaction after issuing the DDC bus switch, it will be
- * switched to the internal SDVO register.
- */
-static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
- u8 target)
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
{
- u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
- struct i2c_msg msgs[] = {
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- },
- /* the following two are to read the response */
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 1,
- .buf = cmd_buf,
- },
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = ret_value,
- },
- };
-
- intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
- &target, 1);
- /* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
-
- out_buf[0] = SDVO_I2C_OPCODE;
- out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
- cmd_buf[0] = SDVO_I2C_CMD_STATUS;
- cmd_buf[1] = 0;
- ret_value[0] = 0;
- ret_value[1] = 0;
-
- ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
- if (ret != 3) {
- /* failure in I2C transfer */
- DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return;
- }
- if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("DDC switch command returns response %d\n",
- ret_value[0]);
- return;
- }
- return;
+ return intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
}
static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
{
- if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
- return false;
-
- return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+ return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
}
static bool
@@ -1022,8 +997,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_sdvo_dtd input_dtd;
-
/* Reset the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return false;
@@ -1035,14 +1008,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
- &input_dtd))
+ &intel_sdvo->input_dtd))
return false;
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- mode->clock = adjusted_mode->clock;
return true;
}
@@ -1050,7 +1021,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ int multiplier;
/* We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
@@ -1065,10 +1037,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
mode,
adjusted_mode);
} else if (intel_sdvo->is_lvds) {
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
-
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo->sdvo_lvds_fixed_mode))
+ intel_sdvo->sdvo_lvds_fixed_mode))
return false;
(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
@@ -1077,9 +1047,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
+ * SDVO device will factor out the multiplier during mode_set.
*/
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
return true;
}
@@ -1092,11 +1063,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- u32 sdvox = 0;
- int sdvo_pixel_multiply, rate;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd;
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
if (!mode)
return;
@@ -1114,28 +1086,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
- if (intel_sdvo->is_hdmi) {
- if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
- return;
-
- sdvox |= SDVO_AUDIO_ENABLE;
- }
+ /* Set the output timings to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
/* We have tried to get input timing in mode_fixup, and filled into
- adjusted_mode */
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
- input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
-
- /* If it's a TV, we already set the output timing in mode_fixup.
- * Otherwise, the output timing is equal to the input timing.
+ * adjusted_mode.
*/
- if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
+ input_dtd = intel_sdvo->input_dtd;
+ } else {
/* Set the output timing to the screen */
if (!intel_sdvo_set_target_output(intel_sdvo,
intel_sdvo->attached_output))
return;
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
(void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
}
@@ -1143,31 +1110,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->is_tv) {
- if (!intel_sdvo_set_tv_format(intel_sdvo))
- return;
- }
+ if (intel_sdvo->is_hdmi &&
+ !intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
+ return;
- /* We would like to use intel_sdvo_create_preferred_input_timing() to
- * provide the device with a timing it can support, if it supports that
- * feature. However, presumably we would need to adjust the CRTC to
- * output the preferred timing, and we don't support that currently.
- */
-#if 0
- success = intel_sdvo_create_preferred_input_timing(encoder, clock,
- width, height);
- if (success) {
- struct intel_sdvo_dtd *input_dtd;
+ if (intel_sdvo->is_tv &&
+ !intel_sdvo_set_tv_format(intel_sdvo))
+ return;
- intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
- intel_sdvo_set_input_timing(encoder, &input_dtd);
- }
-#else
(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
-#endif
- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
- switch (sdvo_pixel_multiply) {
+ switch (pixel_multiplier) {
+ default:
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1176,14 +1130,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
return;
/* Set the SDVO control regs. */
- if (IS_I965G(dev)) {
- sdvox |= SDVO_BORDER_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4) {
+ sdvox = SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
@@ -1196,16 +1150,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_sdvo->is_hdmi)
+ sdvox |= SDVO_AUDIO_ENABLE;
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1214,7 +1170,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 temp;
@@ -1260,8 +1216,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -1285,7 +1240,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
}
/* No use! */
@@ -1389,22 +1375,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
return (caps > 1);
}
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct intel_sdvo *intel_sdvo;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (encoder == intel_attached_encoder(connector))
+ struct intel_sdvo *encoder;
+
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ base.base.head) {
+ if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ if (&encoder->base ==
+ intel_attached_encoder(connector))
return connector;
}
}
}
+
return NULL;
}
@@ -1424,65 +1421,66 @@ intel_analog_is_connected(struct drm_device *dev)
return true;
}
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ if (!intel_analog_is_connected(connector->dev))
+ return NULL;
+
+ return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+}
+
enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- enum drm_connector_status status = connector_status_connected;
- struct edid *edid = NULL;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+ edid = intel_sdvo_get_edid(connector);
- /* This is only applied to SDVO cards with multiple outputs */
if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
- uint8_t saved_ddc, temp_ddc;
- saved_ddc = intel_sdvo->ddc_bus;
- temp_ddc = intel_sdvo->ddc_bus >> 1;
+ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
- while(temp_ddc > 1) {
- intel_sdvo->ddc_bus = temp_ddc;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
- if (edid) {
- /*
- * When we can get the EDID, maybe it is the
- * correct DDC bus. Update it.
- */
- intel_sdvo->ddc_bus = temp_ddc;
+ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ intel_sdvo->ddc_bus = ddc;
+ edid = intel_sdvo_get_edid(connector);
+ if (edid)
break;
- }
- temp_ddc >>= 1;
}
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
if (edid == NULL)
intel_sdvo->ddc_bus = saved_ddc;
}
- /* when there is no edid and no monitor is connected with VGA
- * port, try to use the CRT ddc to read the EDID for DVI-connector
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
*/
- if (edid == NULL && intel_sdvo->analog_ddc_bus &&
- !intel_analog_is_connected(connector->dev))
- edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ status = connector_status_unknown;
if (edid != NULL) {
- bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
-
/* DDC bus is shared, match EDID to connector type */
- if (is_digital && need_digital)
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
- else if (is_digital != need_digital)
- status = connector_status_disconnected;
-
+ }
connector->display_info.raw_edid = NULL;
- } else
- status = connector_status_disconnected;
+ kfree(edid);
+ }
- kfree(edid);
-
return status;
}
@@ -1490,13 +1488,12 @@ static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
if (intel_sdvo->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
@@ -1505,7 +1502,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
- DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
if (response == 0)
return connector_status_disconnected;
@@ -1538,12 +1537,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- int num_modes;
+ struct edid *edid;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ edid = intel_sdvo_get_edid(connector);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1551,12 +1548,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* DDC fails, check to see if the analog output is disconnected, in
* which case we'll look there for the digital DDC data.
*/
- if (num_modes == 0 &&
- intel_sdvo->analog_ddc_bus &&
- !intel_analog_is_connected(connector->dev)) {
- /* Switch to the analog ddc bus and try that
- */
- (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
}
}
@@ -1627,8 +1626,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@@ -1644,7 +1642,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
return;
BUILD_BUG_ON(sizeof(tv_res) != 3);
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res)))
return;
if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
@@ -1662,8 +1661,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_display_mode *newmode;
@@ -1672,7 +1670,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ intel_ddc_get_modes(connector, intel_sdvo->i2c);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1693,6 +1691,10 @@ end:
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
+
+ drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
+ 0);
+
intel_sdvo->is_lvds = true;
break;
}
@@ -1775,8 +1777,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
uint16_t temp_value;
uint8_t cmd;
@@ -1879,9 +1880,8 @@ set_value:
done:
- if (encoder->crtc) {
- struct drm_crtc *crtc = encoder->crtc;
-
+ if (intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y, crtc->fb);
}
@@ -1909,20 +1909,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-
- if (intel_sdvo->analog_ddc_bus)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
intel_sdvo->sdvo_lvds_fixed_mode);
+ i2c_del_adapter(&intel_sdvo->ddc);
intel_encoder_destroy(encoder);
}
@@ -1990,54 +1988,39 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
intel_sdvo_guess_ddc_bus(sdvo);
}
-static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
{
- return intel_sdvo_set_target_output(intel_sdvo,
- device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
- &intel_sdvo->is_hdmi, 1);
-}
+ struct sdvo_device_mapping *mapping;
+ u8 pin, speed;
-static struct intel_sdvo *
-intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
-{
- struct drm_device *dev = chan->drm_dev;
- struct drm_encoder *encoder;
+ if (IS_SDVOB(reg))
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.ddc_bus == &chan->adapter)
- return intel_sdvo;
+ pin = GMBUS_PORT_DPB;
+ speed = GMBUS_RATE_1MHZ >> 8;
+ if (mapping->initialized) {
+ pin = mapping->i2c_pin;
+ speed = mapping->i2c_speed;
}
- return NULL;
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ intel_gmbus_set_speed(sdvo->i2c, speed);
+ intel_gmbus_force_bit(sdvo->i2c, true);
}
-static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg msgs[], int num)
+static bool
+intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
{
- struct intel_sdvo *intel_sdvo;
- struct i2c_algo_bit_data *algo_data;
- const struct i2c_algorithm *algo;
-
- algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_sdvo =
- intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
- (algo_data->data));
- if (intel_sdvo == NULL)
- return -EINVAL;
-
- algo = intel_sdvo->base.i2c_bus->algo;
-
- intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
- return algo->master_xfer(i2c_adap, msgs, num);
+ return intel_sdvo_set_target_output(intel_sdvo,
+ device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+ &intel_sdvo->is_hdmi, 1);
}
-static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
- .master_xfer = intel_sdvo_master_xfer,
-};
-
static u8
intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
@@ -2076,26 +2059,29 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
}
static void
-intel_sdvo_connector_init(struct drm_encoder *encoder,
- struct drm_connector *connector)
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ struct intel_sdvo *encoder)
{
- drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base.base,
+ &intel_sdvo_connector_helper_funcs);
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.base.interlace_allowed = 0;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
- drm_mode_connector_attach_encoder(connector, encoder);
- drm_sysfs_connector_add(connector);
+ intel_connector_attach_encoder(&connector->base, &encoder->base);
+ drm_sysfs_connector_add(&connector->base.base);
}
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2130,7 +2116,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
return true;
}
@@ -2138,83 +2124,83 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
static bool
intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
intel_connector = &intel_sdvo_connector->base;
- connector = &intel_connector->base;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- intel_sdvo->controlled_output |= type;
- intel_sdvo_connector->output_flag = type;
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
- intel_sdvo->is_tv = true;
- intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+ intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
- if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
goto err;
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
- return true;
+ return true;
err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
+ intel_sdvo_destroy(connector);
return false;
}
static bool
intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
intel_connector = &intel_sdvo_connector->base;
- connector = &intel_connector->base;
+ connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
- }
-
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
- return true;
+ intel_sdvo_connector_init(intel_sdvo_connector,
+ intel_sdvo);
+ return true;
}
static bool
intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
@@ -2222,29 +2208,28 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
- }
-
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
return true;
err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
+ intel_sdvo_destroy(connector);
return false;
}
@@ -2309,7 +2294,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
@@ -2375,7 +2360,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
@@ -2504,7 +2489,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
@@ -2522,11 +2507,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
uint16_t response;
} enhancements;
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- &enhancements, sizeof(enhancements)))
- return false;
-
+ enhancements.response = 0;
+ intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
if (enhancements.response == 0) {
DRM_DEBUG_KMS("No enhancement is supported\n");
return true;
@@ -2538,7 +2522,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
else
return true;
+}
+
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+
+ if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
+ .master_xfer = intel_sdvo_ddc_proxy_xfer,
+ .functionality = intel_sdvo_ddc_proxy_func
+};
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
}
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
@@ -2546,95 +2566,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
- u8 ch[0x40];
int i;
- u32 i2c_reg, ddc_reg, analog_ddc_reg;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
+ kfree(intel_sdvo);
+ return false;
+ }
+
intel_sdvo->sdvo_reg = sdvo_reg;
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ /* encoder type will be decided later */
+ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
- if (HAS_PCH_SPLIT(dev)) {
- i2c_reg = PCH_GPIOE;
- ddc_reg = PCH_GPIOE;
- analog_ddc_reg = PCH_GPIOA;
- } else {
- i2c_reg = GPIOE;
- ddc_reg = GPIOE;
- analog_ddc_reg = GPIOA;
- }
-
- /* setup the DDC bus. */
- if (IS_SDVOB(sdvo_reg))
- intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
- else
- intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
-
- if (!intel_encoder->i2c_bus)
- goto err_inteloutput;
-
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
-
- /* Save the bit-banging i2c functionality for use by the DDC wrapper */
- intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
+ u8 byte;
+
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_i2c;
+ goto err;
}
}
- /* setup the DDC bus. */
- if (IS_SDVOB(sdvo_reg)) {
- intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
- "SDVOB/VGA DDC BUS");
+ if (IS_SDVOB(sdvo_reg))
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
- } else {
- intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
- "SDVOC/VGA DDC BUS");
+ else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
- }
- if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
- goto err_i2c;
-
- /* Wrap with our custom algo which switches to DDC mode */
- intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
- /* encoder type will be decided later */
- drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
- goto err_enc;
+ goto err;
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_enc;
+ goto err;
}
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err_enc;
+ goto err;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
- goto err_enc;
+ goto err;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
@@ -2654,16 +2645,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
-err_enc:
- drm_encoder_cleanup(&intel_encoder->enc);
-err_i2c:
- if (intel_sdvo->analog_ddc_bus != NULL)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
- if (intel_encoder->ddc_bus != NULL)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (intel_encoder->i2c_bus != NULL)
- intel_i2c_destroy(intel_encoder->i2c_bus);
-err_inteloutput:
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ i2c_del_adapter(&intel_sdvo->ddc);
kfree(intel_sdvo);
return false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4a117e318a73..2f7681989316 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -48,7 +48,7 @@ struct intel_tv {
struct intel_encoder base;
int type;
- char *tv_format;
+ const char *tv_format;
int margin[4];
u32 save_TV_H_CTL_1;
u32 save_TV_H_CTL_2;
@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
struct tv_mode {
- char *name;
+ const char *name;
int clock;
int refresh; /* in millihertz (for precision) */
u32 oversample;
@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
+ return container_of(encoder, struct intel_tv, base.base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_tv,
+ base);
}
static void
@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
static const struct tv_mode *
-intel_tv_mode_lookup (char *tv_format)
+intel_tv_mode_lookup(const char *tv_format)
{
int i;
@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_tv *intel_tv)
+intel_tv_mode_find(struct intel_tv *intel_tv)
{
return intel_tv_mode_lookup(intel_tv->tv_format);
}
static enum drm_mode_status
-intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
< 1000)
return MODE_OK;
+
return MODE_CLOCK_RANGE;
}
@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
color_conversion->av);
}
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
/* Wait for vblank for the disable to take effect */
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
intel_wait_for_vblank(dev, intel_crtc->pipe);
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+ I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
- I915_WRITE(TV_DAC, 0);
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
I915_WRITE(TV_CTL, tv_ctl);
}
@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
static int
intel_tv_detect_type (struct intel_tv *intel_tv)
{
- struct drm_encoder *encoder = &intel_tv->base.enc;
+ struct drm_encoder *encoder = &intel_tv->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
- int type = DRM_MODE_CONNECTOR_Unknown;
-
- tv_dac = I915_READ(TV_DAC);
+ int type;
/* Disable TV interrupts around load detect or we'll recurse */
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
- /*
- * Detect TV by polling)
- */
- save_tv_dac = tv_dac;
- tv_ctl = I915_READ(TV_CTL);
- save_tv_ctl = tv_ctl;
- tv_ctl &= ~TV_ENC_ENABLE;
- tv_ctl &= ~TV_TEST_MODE_MASK;
+ save_tv_dac = tv_dac = I915_READ(TV_DAC);
+ save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+ /* Poll for TV detection */
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_dac &= ~TVDAC_SENSE_MASK;
- tv_dac &= ~DAC_A_MASK;
- tv_dac &= ~DAC_B_MASK;
- tv_dac &= ~DAC_C_MASK;
+
+ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL |
@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
DAC_A_0_7_V |
DAC_B_0_7_V |
DAC_C_0_7_V);
+
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- msleep(20);
- tv_dac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
- I915_WRITE(TV_CTL, save_tv_ctl);
- POSTING_READ(TV_CTL);
- msleep(20);
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
- /*
- * A B C
- * 0 1 1 Composite
- * 1 0 X svideo
- * 0 0 0 Component
- */
- if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
- type = DRM_MODE_CONNECTOR_Composite;
- } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
- type = DRM_MODE_CONNECTOR_SVIDEO;
- } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
- type = DRM_MODE_CONNECTOR_Component;
- } else {
- DRM_DEBUG_KMS("No TV connection detected\n");
- type = -1;
+ type = -1;
+ if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ }
}
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+
/* Restore interrupt config */
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int i;
@@ -1344,14 +1347,13 @@ static enum drm_connector_status
intel_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_display_mode mode;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
int type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
- if (encoder->crtc && encoder->crtc->enabled) {
+ if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv);
} else if (force) {
struct drm_crtc *crtc;
@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static struct input_res {
- char *name;
+static const struct input_res {
+ const char *name;
int w, h;
-} input_res_table[] =
-{
+} input_res_table[] = {
{"640x480", 640, 480},
{"800x600", 800, 600},
{"1024x768", 1024, 768},
@@ -1396,8 +1397,7 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1422,15 +1422,14 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int j, count = 0;
u64 tmp;
for (j = 0; j < ARRAY_SIZE(input_res_table);
j++) {
- struct input_res *input = &input_res_table[j];
+ const struct input_res *input = &input_res_table[j];
unsigned int hactive_s = input->w;
unsigned int vactive_s = input->h;
@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- struct drm_crtc *crtc = encoder->crtc;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct drm_crtc *crtc = intel_tv->base.base.crtc;
int ret = 0;
bool changed = false;
@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
- char **tv_format_names;
+ char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i, initial_mode = 0;
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
- intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
- intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
intel_tv->margin[TV_MARGIN_RIGHT] = 46;
intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
- intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+ intel_tv->tv_format = tv_modes[initial_mode].name;
- drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
- tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
- GFP_KERNEL);
- if (!tv_format_names)
- goto out;
for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
- tv_format_names[i] = tv_modes[i].name;
- drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
+ tv_format_names[i] = (char *)tv_modes[i].name;
+ drm_mode_create_tv_properties(dev,
+ ARRAY_SIZE(tv_modes),
+ tv_format_names);
drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
initial_mode);
@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
-out:
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 26d0d8ced80d..65ea42cf1795 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -60,8 +60,6 @@ static struct drm_driver driver = {
.irq_uninstall = mga_driver_irq_uninstall,
.irq_handler = mga_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
.fops = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 1de5eb53e016..209912a1b7a5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -379,8 +379,6 @@ static struct drm_driver driver = {
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = nouveau_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 1e2971f13aa1..67309f84f16d 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -56,8 +56,6 @@ static struct drm_driver driver = {
.irq_uninstall = r128_driver_irq_uninstall,
.irq_handler = r128_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
.fops = {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 795403b0e2cd..663cdc10a5c2 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -203,8 +203,6 @@ static struct drm_driver driver_old = {
.irq_uninstall = radeon_driver_irq_uninstall,
.irq_handler = radeon_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
.fops = {
@@ -290,8 +288,6 @@ static struct drm_driver kms_driver = {
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 021de44c15ab..c0385633667d 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,8 +42,6 @@ static struct drm_driver driver = {
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
.reclaim_buffers = savage_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = {
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 776bf9e9ea1a..4d9f311d249d 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -67,13 +67,10 @@ static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
.load = sis_driver_load,
.unload = sis_driver_unload,
- .context_dtor = NULL,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = sis_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ec5a43e65722..e0768adbeccd 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -42,8 +42,6 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 7a1b210401e0..02f733db61c1 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -51,8 +51,6 @@ static struct drm_driver driver = {
.reclaim_buffers_locked = NULL,
.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = via_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a96ed6d9d010..5c845b6ec492 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -798,8 +798,6 @@ static struct drm_driver driver = {
.irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
.reclaim_buffers_locked = NULL,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
.dma_quiescent = NULL, /*vmw_dma_quiescent, */