summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_crtc.c10
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c17
-rw-r--r--drivers/gpu/drm/drm_irq.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c525
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c40
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_display.c104
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c208
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c117
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c49
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c27
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c27
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
33 files changed, 886 insertions, 569 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6985cb1da72c..2baa6708e44c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -156,12 +156,12 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
- { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index f7af91cb273d..2d4e17a004db 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -471,6 +471,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
+ int i;
DRM_DEBUG_KMS("\n");
@@ -666,6 +667,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (ret != 0)
goto fail;
}
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ }
kfree(save_connectors);
kfree(save_encoders);
@@ -841,7 +848,7 @@ static void output_poll_execute(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
- enum drm_connector_status old_status, status;
+ enum drm_connector_status old_status;
bool repoll = false, changed = false;
if (!drm_kms_helper_poll)
@@ -866,8 +873,12 @@ static void output_poll_execute(struct work_struct *work)
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- status = connector->funcs->detect(connector, false);
- if (old_status != status)
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
changed = true;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 9d3a5030b6e1..16d5155edad1 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -585,10 +585,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
struct timeval now;
unsigned long flags;
unsigned int seq;
+ int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL)
- return -ENOMEM;
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
e->pipe = pipe;
e->base.pid = current->pid;
@@ -603,9 +606,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- return -ENOMEM;
+ ret = -EBUSY;
+ goto err_unlock;
}
file_priv->event_space -= sizeof e->event;
@@ -626,7 +628,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, e->pipe);
+ drm_vblank_put(dev, pipe);
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
trace_drm_vblank_event_delivered(current->pid, pipe,
@@ -638,6 +640,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+err_put:
+ drm_vblank_put(dev, pipe);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index af70337567ce..d3e8c540f778 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -242,7 +242,7 @@ fail:
static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
{
- return connector_status_unknown;
+ return connector_status_connected;
}
static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7a26f4dd21ae..cb900dc83d95 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -34,6 +34,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
+#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
@@ -767,6 +768,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev);
break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1868,6 +1872,26 @@ out_unlock:
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
@@ -2072,6 +2096,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->mchdev_lock = &mchdev_lock;
spin_unlock(&mchdev_lock);
+ ips_ping_for_i915_load();
+
return 0;
out_workqueue_free:
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 17b1cba3b5f1..275ec6ed43ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,8 +38,7 @@
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
- bool pipelined);
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -2594,7 +2593,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
if (reg->gpu) {
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
@@ -2742,8 +2741,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
/** Flushes any GPU write domain for the object if it's dirty. */
static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
- bool pipelined)
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
@@ -2762,10 +2760,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
obj->read_domains,
old_write_domain);
- if (pipelined)
- return 0;
-
- return i915_gem_object_wait_rendering(obj, true);
+ return 0;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2826,18 +2821,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
i915_gem_object_flush_cpu_write_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
@@ -2875,7 +2867,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
@@ -2924,9 +2916,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
i915_gem_object_flush_gtt_write_domain(obj);
@@ -2935,12 +2930,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
@@ -3205,9 +3194,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
@@ -3254,192 +3247,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
return 0;
}
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
static int
-i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry)
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- struct drm_gem_object *target_obj = NULL;
- uint32_t target_handle = 0;
- int i, ret = 0;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
- user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
- uint32_t target_offset;
+ target_obj = drm_gem_object_lookup(dev, file_priv,
+ reloc->target_handle);
+ if (target_obj == NULL)
+ return -ENOENT;
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc))) {
- ret = -EFAULT;
- break;
- }
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
- if (reloc.target_handle != target_handle) {
- drm_gem_object_unreference(target_obj);
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
- target_obj = drm_gem_object_lookup(dev, file_priv,
- reloc.target_handle);
- if (target_obj == NULL) {
- ret = -ENOENT;
- break;
- }
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_offset == 0) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ goto err;
+ }
- target_handle = reloc.target_handle;
- }
- target_offset = to_intel_bo(target_obj)->gtt_offset;
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc->read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ goto err;
+ }
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc.offset,
- (int) reloc.target_handle,
- (int) reloc.read_domains,
- (int) reloc.write_domain,
- (int) target_offset,
- (int) reloc.presumed_offset,
- reloc.delta);
-#endif
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_offset == 0) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc.target_handle);
- ret = -EINVAL;
- break;
- }
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ goto out;
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc.write_domain & (reloc.write_domain - 1)) {
- DRM_ERROR("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- ret = -EINVAL;
- break;
- }
- if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
- reloc.read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- ret = -EINVAL;
- break;
- }
- if (reloc.write_domain && target_obj->pending_write_domain &&
- reloc.write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.write_domain,
- target_obj->pending_write_domain);
- ret = -EINVAL;
- break;
- }
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->base.size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ goto err;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ goto err;
+ }
- target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain |= reloc.write_domain;
+ /* and points to somewhere within the target object. */
+ if (reloc->delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta,
+ (int) target_obj->size);
+ goto err;
+ }
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_offset == reloc.presumed_offset)
- continue;
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ char *vaddr;
- /* Check that the relocation address is valid... */
- if (reloc.offset > obj->base.size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset, (int) obj->base.size);
- ret = -EINVAL;
- break;
- }
- if (reloc.offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset);
- ret = -EINVAL;
- break;
- }
+ vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+ } else {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
- /* and points to somewhere within the target object. */
- if (reloc.delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.delta, (int) target_obj->size);
- ret = -EINVAL;
- break;
- }
+ ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+ if (ret)
+ goto err;
- reloc.delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
- uint32_t page_offset = reloc.offset & ~PAGE_MASK;
- char *vaddr;
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc->offset & ~PAGE_MASK));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
- vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
- *(uint32_t *)(vaddr + page_offset) = reloc.delta;
- kunmap_atomic(vaddr);
- } else {
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
- if (ret)
- break;
+out:
+ ret = 0;
+err:
+ drm_gem_object_unreference(target_obj);
+ return ret;
+}
- /* Map the page containing the relocation we're going to perform. */
- reloc.offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc.offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc.offset & ~PAGE_MASK));
- iowrite32(reloc.delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry)
+{
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ int i, ret;
+
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
+
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc)))
+ return -EFAULT;
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
+ if (ret)
+ return ret;
- /* and update the user's relocation entry */
- reloc.presumed_offset = target_offset;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset))) {
- ret = -EFAULT;
- break;
- }
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset)))
+ return -EFAULT;
}
- drm_gem_object_unreference(target_obj);
- return ret;
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int
-i915_gem_execbuffer_pin(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object(obj, file,
+ &exec_list[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i, retry;
@@ -3502,6 +3533,87 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
}
static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ int i, total, ret;
+
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->in_execbuffer = false;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec_list[i].relocation_count;
+
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL) {
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec_list[i].relocation_count *
+ sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ total += exec_list[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ ret = i915_gem_execbuffer_reserve(dev, file,
+ object_list, exec_list,
+ count);
+ if (ret)
+ goto err;
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
+ &exec_list[i],
+ reloc + total);
+ if (ret)
+ goto err;
+
+ total += exec_list[i].relocation_count;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ return ret;
+}
+
+static int
i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
@@ -3630,8 +3742,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
- size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
+ int length; /* limited by fault_in_pages_readable() */
+
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
@@ -3774,18 +3893,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_pin(dev, file,
- object_list, exec_list,
- args->buffer_count);
+ ret = i915_gem_execbuffer_reserve(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
+ ret = i915_gem_execbuffer_relocate(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file,
+ object_list,
+ exec_list,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
if (ret)
goto err;
}
@@ -4249,10 +4374,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+ if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(dev, file_priv,
obj_priv->ring,
0, obj->write_domain);
+ } else if (obj_priv->ring->outstanding_lazy_request) {
+ /* This ring is not being cleared by active usage,
+ * so emit a request to do so.
+ */
+ u32 seqno = i915_add_request(dev,
+ NULL, NULL,
+ obj_priv->ring);
+ if (seqno == 0)
+ ret = -ENOMEM;
+ }
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 25ed911a3112..cb8f43429279 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2471,6 +2471,9 @@
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
+#define PCH_3DCGDIS1 0x46024
+# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
#define FDI_PLL_FREQ_CTL 0x46030
#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
@@ -2588,6 +2591,13 @@
#define ILK_DISPLAY_CHICKEN2 0x42004
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
+#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31)
+#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30)
+#define ILK_DISPLAY_DEBUG_DISABLE (1<<29)
+#define ILK_HDCP_DISABLE (1<<25)
+#define ILK_eDP_A_DISABLE (1<<24)
+#define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
@@ -3033,6 +3043,7 @@
#define TRANS_DP_10BPC (1<<9)
#define TRANS_DP_6BPC (2<<9)
#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 454c064f8ef7..42729d25da58 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Cursor state */
+ dev_priv->saveCURACNTR = I915_READ(CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ /* Cursor state */
+ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+
return;
}
@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
- /* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 65c88f9ba12c..2cb8e0b9f1ee 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -190,37 +190,6 @@ out:
kfree(output.pointer);
}
-static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
-{
- return 0;
-}
-
-static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
- enum vga_switcheroo_state state)
-{
- return 0;
-}
-
-static int intel_dsm_init(void)
-{
- return 0;
-}
-
-static int intel_dsm_get_client_id(struct pci_dev *pdev)
-{
- if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
- return VGA_SWITCHEROO_IGD;
- else
- return VGA_SWITCHEROO_DIS;
-}
-
-static struct vga_switcheroo_handler intel_dsm_handler = {
- .switchto = intel_dsm_switchto,
- .power_state = intel_dsm_power_state,
- .init = intel_dsm_init,
- .get_client_id = intel_dsm_get_client_id,
-};
-
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle, intel_handle;
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void)
{
if (!intel_dsm_detect())
return;
-
- vga_switcheroo_register_handler(&intel_dsm_handler);
}
void intel_unregister_dsm_handler(void)
{
- vga_switcheroo_unregister_handler();
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index bee24b1a58e8..fca523288aca 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2120,9 +2120,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
+ temp |= TRANS_DP_8BPC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2712,27 +2714,19 @@ fdi_reduce_ratio(u32 *num, u32 *den)
}
}
-#define DATA_N 0x800000
-#define LINK_N 0x80000
-
static void
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
int link_clock, struct fdi_m_n *m_n)
{
- u64 temp;
-
m_n->tu = 64; /* default size */
- temp = (u64) DATA_N * pixel_clock;
- temp = div_u64(temp, link_clock);
- m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
- m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
- m_n->gmch_n = DATA_N;
+ /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->gmch_m = bits_per_pixel * pixel_clock;
+ m_n->gmch_n = link_clock * nlanes * 8;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- temp = (u64) LINK_N * pixel_clock;
- m_n->link_m = div_u64(temp, link_clock);
- m_n->link_n = LINK_N;
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
@@ -3716,6 +3710,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
int lane = 0, link_bw, bpp;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -3799,6 +3794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->fdi_lanes = lane;
+ if (pixel_multiplier > 1)
+ link_bw *= pixel_multiplier;
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -5236,6 +5233,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_sanitize_modesetting(struct drm_device *dev,
+ int pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ /* Who knows what state these registers were left in by the BIOS or
+ * grub?
+ *
+ * If we leave the registers in a conflicting state (e.g. with the
+ * display plane reading from the other pipe than the one we intend
+ * to use) then when we attempt to teardown the active mode, we will
+ * not disable the pipes and planes in the correct order -- leaving
+ * a plane reading from a disabled pipe and possibly leading to
+ * undefined behaviour.
+ */
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+ if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
+ return;
+
+ /* This display plane is active and attached to the other CPU pipe. */
+ pipe = !pipe;
+
+ /* Disable the plane and wait for it to stop reading from the pipe. */
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
+
+ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Switch off the pipe. */
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE) {
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev, pipe);
+ }
+}
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
@@ -5287,6 +5333,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
+
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -5331,19 +5379,41 @@ static int intel_encoder_clones(struct drm_device *dev, int type_mask)
return index_mask;
}
+static bool has_edp_a(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_MOBILE(dev))
+ return false;
+
+ if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+ return false;
+
+ if (IS_GEN5(dev) &&
+ (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
+ return false;
+
+ return true;
+}
+
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
+ bool has_lvds = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
- intel_lvds_init(dev);
+ has_lvds = intel_lvds_init(dev);
+ if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+ /* disable the panel fitter on everything but LVDS */
+ I915_WRITE(PFIT_CONTROL, 0);
+ }
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
- if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
+ if (has_edp_a(dev))
intel_dp_init(dev, DP_A);
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
@@ -5772,6 +5842,8 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
}
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c8e005553310..864417cffe9a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -479,6 +479,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
+ unsigned retry;
int msg_bytes;
int reply_bytes;
int ret;
@@ -513,14 +514,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
}
- for (;;) {
- ret = intel_dp_aux_ch(intel_dp,
- msg, msg_bytes,
- reply, reply_bytes);
+ for (retry = 0; retry < 5; retry++) {
+ ret = intel_dp_aux_ch(intel_dp,
+ msg, msg_bytes,
+ reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
+
+ switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case AUX_NATIVE_REPLY_DEFER:
+ udelay(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return -EREMOTEIO;
+ }
+
switch (reply[0] & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
@@ -528,17 +548,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
return reply_bytes - 1;
case AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("aux_ch nack\n");
+ DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
- DRM_DEBUG_KMS("aux_ch defer\n");
+ DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
- DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
return -EREMOTEIO;
}
}
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EREMOTEIO;
}
static int
@@ -584,17 +607,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = dev_priv->panel_fixed_mode->clock;
}
- /* Just use VBT values for eDP */
- if (is_edp(intel_dp)) {
- intel_dp->lane_count = dev_priv->edp.lanes;
- intel_dp->link_bw = dev_priv->edp.rate;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
- return true;
- }
-
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -613,6 +625,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+ if (is_edp(intel_dp)) {
+ /* okay we failed just pick the highest */
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+
+ return true;
+ }
+
return false;
}
@@ -1087,21 +1112,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
}
static uint32_t
-intel_dp_signal_levels(struct intel_dp *intel_dp)
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t signal_levels = 0;
- u8 train_set = intel_dp->train_set[0];
- u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
- u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
+ uint32_t signal_levels = 0;
- if (is_edp(intel_dp)) {
- vswing = dev_priv->edp.vswing;
- preemphasis = dev_priv->edp.preemphasis;
- }
-
- switch (vswing) {
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
default:
signal_levels |= DP_VOLTAGE_0_4;
@@ -1116,7 +1131,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp)
signal_levels |= DP_VOLTAGE_1_2;
break;
}
- switch (preemphasis) {
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
default:
signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1203,18 +1218,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp)
}
static bool
-intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
- return false;
-
- return true;
-}
-
-static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
@@ -1226,9 +1229,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
- if (!intel_dp_aux_handshake_required(intel_dp))
- return true;
-
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
@@ -1261,11 +1261,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
- if (intel_dp_aux_handshake_required(intel_dp))
- /* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
@@ -1283,7 +1282,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1297,37 +1296,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
break;
/* Set training pattern 1 */
- udelay(500);
- if (intel_dp_aux_handshake_required(intel_dp)) {
+ udelay(100);
+ if (!intel_dp_get_link_status(intel_dp))
break;
- } else {
- if (!intel_dp_get_link_status(intel_dp))
- break;
- if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
- clock_recovery = true;
- break;
- }
+ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ clock_recovery = true;
+ break;
+ }
- /* Check to see if we've tried the max voltage */
- for (i = 0; i < intel_dp->lane_count; i++)
- if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
- break;
- if (i == intel_dp->lane_count)
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
+ if (i == intel_dp->lane_count)
+ break;
- /* Check to see if we've tried the same voltage 5 times */
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++tries;
- if (tries == 5)
- break;
- } else
- tries = 0;
- voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
- }
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
}
intel_dp->DP = DP;
@@ -1354,7 +1349,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1368,28 +1363,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP_TRAINING_PATTERN_2))
break;
- udelay(500);
-
- if (!intel_dp_aux_handshake_required(intel_dp)) {
+ udelay(400);
+ if (!intel_dp_get_link_status(intel_dp))
break;
- } else {
- if (!intel_dp_get_link_status(intel_dp))
- break;
- if (intel_channel_eq_ok(intel_dp)) {
- channel_eq = true;
- break;
- }
+ if (intel_channel_eq_ok(intel_dp)) {
+ channel_eq = true;
+ break;
+ }
- /* Try 5 times */
- if (tries > 5)
- break;
+ /* Try 5 times */
+ if (tries > 5)
+ break;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
- ++tries;
- }
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
+ ++tries;
}
+
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
@@ -1408,6 +1399,9 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
+ if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
DRM_DEBUG_KMS("\n");
if (is_edp(intel_dp)) {
@@ -1430,6 +1424,28 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (is_edp(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
+
+ if (!HAS_PCH_CPT(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ intel_wait_for_vblank(intel_dp->base.base.dev,
+ intel_crtc->pipe);
+ }
+
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 21551fe74541..e52c6125bb1f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -237,7 +237,7 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
-extern void intel_lvds_init(struct drm_device *dev);
+extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4324a326f98e..25bcedf386fd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
+static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
lvds_reg = LVDS;
}
- if (on) {
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
- intel_panel_set_backlight(dev, dev_priv->backlight_level);
- } else {
- dev_priv->backlight_level = intel_panel_get_backlight(dev);
-
- intel_panel_set_backlight(dev, 0);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (intel_lvds->pfit_control) {
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
- I915_WRITE(PFIT_CONTROL, 0);
- intel_lvds->pfit_control = 0;
+ if (intel_lvds->pfit_dirty) {
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
+ DRM_ERROR("timed out waiting for panel to power off\n");
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
intel_lvds->pfit_dirty = false;
}
+ }
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_reg);
+
+ intel_panel_set_backlight(dev, dev_priv->backlight_level);
+}
+
+static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+{
+ struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ctl_reg, lvds_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ intel_panel_set_backlight(dev, 0);
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+
+ if (intel_lvds->pfit_control) {
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ I915_WRITE(PFIT_CONTROL, 0);
+ intel_lvds->pfit_dirty = true;
}
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
}
@@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(intel_lvds, true);
+ intel_lvds_enable(intel_lvds);
else
- intel_lvds_set_power(intel_lvds, false);
+ intel_lvds_disable(intel_lvds);
/* XXX: We never power down the LVDS pairs. */
}
@@ -411,43 +446,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
/* Always do a full power on as we do not know what state
* we were left in.
*/
- intel_lvds_set_power(intel_lvds, true);
+ intel_lvds_enable(intel_lvds);
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
-
- if (HAS_PCH_SPLIT(dev))
- return;
-
- if (!intel_lvds->pfit_dirty)
- return;
-
- /*
- * Enable automatic panel scaling so that non-native modes fill the
- * screen. Should be enabled before the pipe is enabled, according to
- * register description and PRM.
- */
- DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- intel_lvds->pfit_control,
- intel_lvds->pfit_pgm_ratios);
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
-
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
}
/**
@@ -837,7 +847,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_device *dev)
+bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds;
@@ -853,37 +863,37 @@ void intel_lvds_init(struct drm_device *dev)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
- return;
+ return false;
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
- return;
+ return false;
}
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
- return;
+ return false;
if (dev_priv->edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
+ return false;
}
}
if (!intel_lvds_ddc_probe(dev, pin)) {
DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
- return;
+ return false;
}
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
- return;
+ return false;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_lvds);
- return;
+ return false;
}
if (!HAS_PCH_SPLIT(dev)) {
@@ -1026,7 +1036,7 @@ out:
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- return;
+ return true;
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@@ -1034,4 +1044,5 @@ failed:
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
kfree(intel_connector);
+ return false;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b83306f9244b..31cd7e33e820 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -156,23 +156,25 @@ static int init_ring_common(struct drm_device *dev,
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
- DRM_ERROR("%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
I915_WRITE_HEAD(ring, 0);
- DRM_ERROR("%s head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
}
I915_WRITE_CTL(ring,
@@ -694,20 +696,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
u32 head;
- head = intel_read_status_page(ring, 4);
- if (head) {
- ring->head = head & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
- if (ring->space >= n)
- return 0;
- }
-
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ /* If the reported head position has wrapped or hasn't advanced,
+ * fallback to the slow and accurate path.
+ */
+ head = intel_read_status_page(ring, 4);
+ if (head < ring->actual_head)
+ head = I915_READ_HEAD(ring);
+ ring->actual_head = head;
+ ring->head = head & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3126c2681983..d2cd0f1efeed 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -30,8 +30,9 @@ struct intel_ring_buffer {
struct drm_device *dev;
struct drm_gem_object *gem_object;
- unsigned int head;
- unsigned int tail;
+ u32 actual_head;
+ u32 head;
+ u32 tail;
int space;
struct intel_hw_status_page status_page;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de158b76bcd5..6bc42fa2a6ec 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -107,7 +107,8 @@ struct intel_sdvo {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
- bool has_audio;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
/**
* This is set if we detect output of sdvo device as LVDS and
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->is_hdmi &&
+ if (intel_sdvo->has_hdmi_monitor &&
!intel_sdvo_set_avi_infoframe(intel_sdvo))
return;
@@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
- if (intel_sdvo->has_audio)
+ if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
return drm_get_edid(connector, &sdvo->ddc);
}
-static struct drm_connector *
-intel_find_analog_connector(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct intel_sdvo *encoder;
-
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- base.base.head) {
- if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (&encoder->base ==
- intel_attached_encoder(connector))
- return connector;
- }
- }
- }
-
- return NULL;
-}
-
-static int
-intel_analog_is_connected(struct drm_device *dev)
-{
- struct drm_connector *analog_connector;
-
- analog_connector = intel_find_analog_connector(dev);
- if (!analog_connector)
- return false;
-
- if (analog_connector->funcs->detect(analog_connector, false) ==
- connector_status_disconnected)
- return false;
-
- return true;
-}
-
/* Mac mini hack -- use the same DDC as the analog connector */
static struct edid *
intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- if (!intel_analog_is_connected(connector->dev))
- return NULL;
-
- return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
}
enum drm_connector_status
@@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
- intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (intel_sdvo_connector->force_audio)
- intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
+ intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
}
return status;
@@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
- if (intel_sdvo->is_tv) {
- /* add 30ms delay when the output type is SDVO-TV */
+
+ /* add 30ms delay when the output type might be TV */
+ if (intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
- }
+
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
@@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- drm_mode_connector_update_edid_property(connector, edid);
- drm_add_edid_modes(connector, edid);
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector->force_audio = val;
- if (val > 0 && intel_sdvo->has_audio)
+ if (val > 0 && intel_sdvo->has_hdmi_audio)
return 0;
- if (val < 0 && !intel_sdvo->has_audio)
+ if (val < 0 && !intel_sdvo->has_hdmi_audio)
return 0;
- intel_sdvo->has_audio = val > 0;
+ intel_sdvo->has_hdmi_audio = val > 0;
goto done;
}
@@ -1942,9 +1908,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
speed = mapping->i2c_speed;
}
- sdvo->i2c = &dev_priv->gmbus[pin].adapter;
- intel_gmbus_set_speed(sdvo->i2c, speed);
- intel_gmbus_force_bit(sdvo->i2c, true);
+ if (pin < GMBUS_NUM_PORTS) {
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ intel_gmbus_set_speed(sdvo->i2c, speed);
+ intel_gmbus_force_bit(sdvo->i2c, true);
+ } else
+ sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
}
static bool
@@ -2070,14 +2039,15 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
-
- intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+ if (intel_sdvo->is_hdmi)
+ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
return true;
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 8e421f644a54..05efb5b9f13e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index df2b6f2b35f8..9fbabaa6ee44 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -253,7 +253,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
- atombios_blank_crtc(crtc, ATOM_ENABLE);
+ if (radeon_crtc->enabled)
+ atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
@@ -530,7 +531,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
dp_clock = dig_connector->dp_clock;
}
}
-
+#if 0 /* doesn't work properly on some laptops */
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
@@ -540,7 +541,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
}
-
+#endif
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4dc5b4714c5a..7b337c361a12 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -748,6 +748,8 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
unsigned i;
u32 tmp;
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -1922,7 +1924,6 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
- u32 srbm_reset = 0;
u32 grbm_reset = 0;
dev_info(rdev->dev, "GPU softreset \n");
@@ -1961,16 +1962,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
-
- /* reset all the system blocks */
- srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
-
- dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
- WREG32(SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- (void)RREG32(SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
@@ -1981,10 +1972,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
- /* After reset we need to reinit the asic as GPU often endup in an
- * incoherent state.
- */
- atom_asic_init(rdev->mode_info.atom_context);
evergreen_mc_resume(rdev, &save);
return 0;
}
@@ -2596,6 +2583,11 @@ int evergreen_resume(struct radeon_device *rdev)
{
int r;
+ /* reset the asic, the gfx blocks are often in a bad state
+ * after the driver is unloaded or after a resume
+ */
+ if (radeon_asic_reset(rdev))
+ dev_warn(rdev->dev, "GPU reset failed !\n");
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
* posting will perform necessary task to bring back GPU into good
* shape.
@@ -2712,6 +2704,11 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_atombios_init(rdev);
if (r)
return r;
+ /* reset the asic, the gfx blocks are often in a bad state
+ * after the driver is unloaded or after a resume
+ */
+ if (radeon_asic_reset(rdev))
+ dev_warn(rdev->dev, "GPU reset failed !\n");
/* Post card if necessary */
if (!evergreen_card_posted(rdev)) {
if (!rdev->bios) {
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 113c70cc8b39..a73b53c44359 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -174,6 +174,7 @@
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a3552594ccc4..9c92db7c896b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -878,12 +878,15 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
u32 tmp;
/* flush hdp cache so updates hit vram */
- if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+ !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
u32 tmp;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ * This seems to cause problems on some AGP cards. Just use the old
+ * method for them.
*/
WREG32(HDP_DEBUG1, 0);
tmp = readl((void __iomem *)ptr);
@@ -1195,8 +1198,10 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->vram_end, mc->real_vram_size >> 20);
} else {
u64 base = 0;
- if (rdev->flags & RADEON_IS_IGP)
- base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ if (rdev->flags & RADEON_IS_IGP) {
+ base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+ }
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, mc);
@@ -1337,13 +1342,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
+ struct r100_gpu_lockup *lockup;
int r;
+ if (rdev->family >= CHIP_RV770)
+ lockup = &rdev->config.rv770.lockup;
+ else
+ lockup = &rdev->config.r600.lockup;
+
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
@@ -1355,7 +1366,7 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
int r600_asic_reset(struct radeon_device *rdev)
@@ -3483,10 +3494,12 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
- * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
+ * This seems to cause problems on some AGP cards. Just use the old
+ * method for them.
*/
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
- rdev->vram_scratch.ptr) {
+ rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9bebac1ec006..7831e0890210 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -315,11 +315,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
/* it rounds up height too far for slice tile max but the BO is smaller */
- tmp = (height - 7) * pitch * bpe;
- if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
- dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
- return -EINVAL;
- }
+ /* r600c,g also seem to flush at bad times in some apps resulting in
+ * bogus values here. So for linear just allow anything to avoid breaking
+ * broken userspace.
+ */
} else {
dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d84612ae47e0..33cda016b083 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -86,6 +86,7 @@
#define R600_HDP_NONSURFACE_BASE 0x2c04
#define R600_BUS_CNTL 0x5420
+# define R600_BIOS_ROM_DIS (1 << 1)
#define R600_CONFIG_CNTL 0x5424
#define R600_CONFIG_MEMSIZE 0x5428
#define R600_CONFIG_F0_BASE 0x542C
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 87ead090c7d5..bc5a2c3382d9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 654787ec43f4..8f2c7b50dcf5 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
return true;
}
+
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
}
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3bddea5b5295..137b8075f6e7 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -729,7 +729,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
- clk, data);
+ (1 << clk), (1 << data));
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3bef9f6d66fd..8afaf7a7459e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1175,6 +1175,8 @@ radeon_add_atom_connector(struct drm_device *dev,
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1190,6 +1192,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1226,6 +1230,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.load_detect_property,
1);
}
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
@@ -1256,6 +1265,11 @@ radeon_add_atom_connector(struct drm_device *dev,
0);
}
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
@@ -1293,6 +1307,9 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ connector->interlace_allowed = true;
+ /* in theory with a DP to VGA converter... */
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1308,6 +1325,8 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1326,6 +1345,8 @@ radeon_add_atom_connector(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
@@ -1403,6 +1424,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1418,6 +1441,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1435,6 +1460,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
}
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1457,6 +1487,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1470,6 +1502,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d8ac1849180d..501966a13f48 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -286,7 +286,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
@@ -323,7 +323,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
- dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
@@ -910,11 +910,6 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- /* turn on display hw */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
-
radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();
@@ -922,6 +917,10 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_hpd_init(rdev);
/* blat the mode back in */
drm_helper_resume_force_mode(dev);
+ /* turn on display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 88e4ea925900..60e689f2d048 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -232,9 +232,28 @@ static struct drm_driver driver_old = {
static struct drm_driver kms_driver;
+static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
+ kfree(ap);
+}
+
static int __devinit
radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ /* Get rid of things like offb */
+ radeon_kick_out_firmware_fb(pdev);
+
return drm_get_pci_dev(pdev, ent, &kms_driver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index efa211898fe6..6abea32be5e8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -245,7 +245,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
goto out_unref;
}
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
- info->apertures->ranges[0].size = rdev->mc.real_vram_size;
+ info->apertures->ranges[0].size = rdev->mc.aper_size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1d067743fee0..a598d0049aa5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
u32 c = 0;
rbo->placement.fpfn = 0;
- rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
+ rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
@@ -91,7 +91,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
{
struct radeon_bo *bo;
enum ttm_bo_type type;
- int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ unsigned long max_size = 0;
int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -104,6 +105,14 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
}
*bo_ptr = NULL;
+ /* maximun bo size is the minimun btw visible vram and gtt size */
+ max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ if ((page_align << PAGE_SHIFT) >= max_size) {
+ printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
+ __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
+ return -ENOMEM;
+ }
+
retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 148a322d8f5d..934a96a78540 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1472,8 +1472,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
list_del(&bdev->device_list);
mutex_unlock(&glob->device_list_mutex);
- if (!cancel_delayed_work(&bdev->wq))
- flush_scheduled_work();
+ cancel_delayed_work_sync(&bdev->wq);
while (ttm_bo_delayed_delete(bdev, true))
;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 41d9a5b73c03..fe096a7cc0d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -659,7 +659,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
par->dirty.active = false;
spin_unlock_irqrestore(&par->dirty.lock, flags);
- flush_scheduled_work();
+ flush_delayed_work_sync(&info->deferred_work);
par->bo_ptr = NULL;
ttm_bo_kunmap(&par->map);