From 13a5660c137e4c9ca88e0bb20d518eff016bcfbc Mon Sep 17 00:00:00 2001 From: Deepak S Date: Fri, 23 May 2014 21:00:21 +0530 Subject: drm/i915/chv: Freq(opcode) request for CHV. On CHV, All the freq request should be even. So, we need to make sure we request the opcode accordingly. v2: Avoid vairable for freq request (ville) Signed-off-by: Deepak S Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index cf288a95347c..218ef085cf73 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1252,8 +1252,10 @@ static void gen6_pm_rps_work(struct work_struct *work) if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (adj > 0) adj *= 2; - else - adj = 1; + else { + /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; + } new_delay = dev_priv->rps.cur_freq + adj; /* @@ -1271,8 +1273,10 @@ static void gen6_pm_rps_work(struct work_struct *work) } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { if (adj < 0) adj *= 2; - else - adj = -1; + else { + /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; + } new_delay = dev_priv->rps.cur_freq + adj; } else { /* unknown event */ new_delay = dev_priv->rps.cur_freq; -- cgit v1.2.3 From 84c33a64b4e29757d04ba1b285b96ee160d5ff93 Mon Sep 17 00:00:00 2001 From: Sourab Gupta Date: Mon, 2 Jun 2014 16:47:17 +0530 Subject: drm/i915: Replaced Blitter ring based flips with MMIO flips This patch enables the framework for using MMIO based flip calls, in contrast with the CS based flip calls which are being used currently. MMIO based flip calls can be enabled on architectures where Render and Blitter engines reside in different power wells. The decision to use MMIO flips can be made based on workloads to give 100% residency for Media power well. v2: The MMIO flips now use the interrupt driven mechanism for issuing the flips when target seqno is reached. (Incorporating Ville's idea) v3: Rebasing on latest code. Code restructuring after incorporating Damien's comments v4: Addressing Ville's review comments -general cleanup -updating only base addr instead of calling update_primary_plane -extending patch for gen5+ platforms v5: Addressed Ville's review comments -Making mmio flip vs cs flip selection based on module parameter -Adding check for DRIVER_MODESET feature in notify_ring before calling notify mmio flip. -Other changes mostly in function arguments v6: -Having a seperate function to check condition for using mmio flips (Ville) -propogating error code from i915_gem_check_olr (Ville) v7: -Adding __must_check with i915_gem_check_olr (Chris) -Renaming mmio_flip_data to mmio_flip (Chris) -Rebasing on latest nightly v8: -Rebasing on latest code -squash 3rd patch in series(mmio setbase vs page flip race) with this patch -Added new tiling mode update in intel_do_mmio_flip (Chris) v9: -check for obj->last_write_seqno being 0 instead of obj->ring being NULL in intel_postpone_flip, as this is a more restrictive condition (Chris) v10: -Applied Chris's suggestions for squashing patches 2,3 into this patch. These patches make the selection of CS vs MMIO flip at the page flip time, and make the module parameter for using mmio flips as tristate, the states being 'force CS flips', 'force mmio flips', 'driver discretion'. Changed the logic for driver discretion (Chris) v11: Minor code cleanup(better readability, fixing whitespace errors, using lockdep to check mutex locked status in postpone_flip, removal of __must_check in function definition) (Chris) Reviewed-by: Chris Wilson Signed-off-by: Sourab Gupta Signed-off-by: Akash Goel Tested-by: Chris Wilson # snb, ivb [danvet: Fix up parameter alignement checkpatch spotted.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 8 ++ drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 3 + drivers/gpu/drm/i915/i915_params.c | 5 ++ drivers/gpu/drm/i915/intel_display.c | 148 ++++++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_drv.h | 6 ++ 7 files changed, 171 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0f98957b5246..10009fc6b12f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1575,6 +1575,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); + spin_lock_init(&dev_priv->mmio_flip_lock); mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->modeset_restore_lock); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c5b02159898b..9bfc242af87f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1370,6 +1370,9 @@ struct drm_i915_private { /* protects the irq masks */ spinlock_t irq_lock; + /* protects the mmio flip data */ + spinlock_t mmio_flip_lock; + bool display_irqs_enabled; /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ @@ -2043,6 +2046,7 @@ struct i915_params { bool reset; bool disable_display; bool disable_vtd_wa; + int use_mmio_flip; }; extern struct i915_params i915 __read_mostly; @@ -2239,6 +2243,8 @@ bool i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); +int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno); + static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { return unlikely(atomic_read(&error->reset_counter) @@ -2608,6 +2614,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +void intel_notify_mmio_flip(struct intel_engine_cs *ring); + /* overlay */ extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 234aa1ca2eb8..7f643db26829 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error, * Compare seqno against outstanding lazy request. Emit a request if they are * equal. */ -static int +int i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) { int ret; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 218ef085cf73..c7d4cb7600e5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1218,6 +1218,9 @@ static void notify_ring(struct drm_device *dev, trace_i915_gem_request_complete(ring); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_notify_mmio_flip(ring); + wake_up_all(&ring->irq_queue); i915_queue_hangcheck(dev); } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index d05a2afa17dc..81457293cd3e 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = { .disable_display = 0, .enable_cmd_parser = 1, .disable_vtd_wa = 0, + .use_mmio_flip = 0, }; module_param_named(modeset, i915.modeset, int, 0400); @@ -156,3 +157,7 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)" module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); MODULE_PARM_DESC(enable_cmd_parser, "Enable command parsing (1=enabled [default], 0=disabled)"); + +module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); +MODULE_PARM_DESC(use_mmio_flip, + "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a514779a5918..5e8e7113b453 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9248,6 +9248,147 @@ static int intel_gen7_queue_flip(struct drm_device *dev, return 0; } +static bool use_mmio_flip(struct intel_engine_cs *ring, + struct drm_i915_gem_object *obj) +{ + /* + * This is not being used for older platforms, because + * non-availability of flip done interrupt forces us to use + * CS flips. Older platforms derive flip done using some clever + * tricks involving the flip_pending status bits and vblank irqs. + * So using MMIO flips there would disrupt this mechanism. + */ + + if (INTEL_INFO(ring->dev)->gen < 5) + return false; + + if (i915.use_mmio_flip < 0) + return false; + else if (i915.use_mmio_flip > 0) + return true; + else + return ring != obj->ring; +} + +static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_framebuffer *intel_fb = + to_intel_framebuffer(intel_crtc->base.primary->fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + u32 dspcntr; + u32 reg; + + intel_mark_page_flip_active(intel_crtc); + + reg = DSPCNTR(intel_crtc->plane); + dspcntr = I915_READ(reg); + + if (INTEL_INFO(dev)->gen >= 4) { + if (obj->tiling_mode != I915_TILING_NONE) + dspcntr |= DISPPLANE_TILED; + else + dspcntr &= ~DISPPLANE_TILED; + } + I915_WRITE(reg, dspcntr); + + I915_WRITE(DSPSURF(intel_crtc->plane), + intel_crtc->unpin_work->gtt_offset); + POSTING_READ(DSPSURF(intel_crtc->plane)); +} + +static int intel_postpone_flip(struct drm_i915_gem_object *obj) +{ + struct intel_engine_cs *ring; + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + if (!obj->last_write_seqno) + return 0; + + ring = obj->ring; + + if (i915_seqno_passed(ring->get_seqno(ring, true), + obj->last_write_seqno)) + return 0; + + ret = i915_gem_check_olr(ring, obj->last_write_seqno); + if (ret) + return ret; + + if (WARN_ON(!ring->irq_get(ring))) + return 0; + + return 1; +} + +void intel_notify_mmio_flip(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct intel_crtc *intel_crtc; + unsigned long irq_flags; + u32 seqno; + + seqno = ring->get_seqno(ring, false); + + spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); + for_each_intel_crtc(ring->dev, intel_crtc) { + struct intel_mmio_flip *mmio_flip; + + mmio_flip = &intel_crtc->mmio_flip; + if (mmio_flip->seqno == 0) + continue; + + if (ring->id != mmio_flip->ring_id) + continue; + + if (i915_seqno_passed(seqno, mmio_flip->seqno)) { + intel_do_mmio_flip(intel_crtc); + mmio_flip->seqno = 0; + ring->irq_put(ring); + } + } + spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); +} + +static int intel_queue_mmio_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring, + uint32_t flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned long irq_flags; + int ret; + + if (WARN_ON(intel_crtc->mmio_flip.seqno)) + return -EBUSY; + + ret = intel_postpone_flip(obj); + if (ret < 0) + return ret; + if (ret == 0) { + intel_do_mmio_flip(intel_crtc); + return 0; + } + + spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); + intel_crtc->mmio_flip.seqno = obj->last_write_seqno; + intel_crtc->mmio_flip.ring_id = obj->ring->id; + spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); + + /* + * Double check to catch cases where irq fired before + * mmio flip data was ready + */ + intel_notify_mmio_flip(obj->ring); + return 0; +} + static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -9358,7 +9499,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->gtt_offset = i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; - ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags); + if (use_mmio_flip(ring, obj)) + ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, + page_flip_flags); + else + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, + page_flip_flags); if (ret) goto cleanup_unpin; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9aa95f01b160..cac753932032 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -358,6 +358,11 @@ struct intel_pipe_wm { bool sprites_scaled; }; +struct intel_mmio_flip { + u32 seqno; + u32 ring_id; +}; + struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -411,6 +416,7 @@ struct intel_crtc { wait_queue_head_t vbl_wait; int scanline_offset; + struct intel_mmio_flip mmio_flip; }; struct intel_plane_wm_parameters { -- cgit v1.2.3 From 72c90f625ccf7aa348f3beae236e09cc837308ee Mon Sep 17 00:00:00 2001 From: Oscar Mateo Date: Mon, 16 Jun 2014 16:10:57 +0100 Subject: drm/i915: Ack interrupts before handling them (GEN5 - GEN7) Otherwise, we might receive a new interrupt before we have time to ack the first one, eventually missing it. According to BSPec, the right order should be: 1 - Disable Master Interrupt Control. 2 - Find the source(s) of the interrupt. 3 - Clear the Interrupt Identity bits (IIR). 4 - Process the interrupt(s) that had bits set in the IIRs. 5 - Re-enable Master Interrupt Control. Without an atomic XCHG operation with mmio space, the above merely reduces the window in which we can miss an interrupt (especially when you consider how heavyweight the I915_READ/I915_WRITE operations are). We maintain the "disable SDE interrupts when handling" hack since apparently it works. Spotted by Bob Beckett . v2: Add warning to commit message and comments to the code as per Chris Wilson's request. v3: Improve the source comments. Signed-off-by: Oscar Mateo Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c7d4cb7600e5..35c7e3a5b046 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2139,6 +2139,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) } } +/* + * To handle irqs with the minimum potential races with fresh interrupts, we: + * 1 - Disable Master Interrupt Control. + * 2 - Find the source(s) of the interrupt. + * 3 - Clear the Interrupt Identity bits (IIR). + * 4 - Process the interrupt(s) that had bits set in the IIRs. + * 5 - Re-enable Master Interrupt Control. + */ static irqreturn_t ironlake_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; @@ -2166,32 +2174,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) POSTING_READ(SDEIER); } + /* Find, clear, then process each source of interrupt */ + gt_iir = I915_READ(GTIIR); if (gt_iir) { + I915_WRITE(GTIIR, gt_iir); + ret = IRQ_HANDLED; if (INTEL_INFO(dev)->gen >= 6) snb_gt_irq_handler(dev, dev_priv, gt_iir); else ilk_gt_irq_handler(dev, dev_priv, gt_iir); - I915_WRITE(GTIIR, gt_iir); - ret = IRQ_HANDLED; } de_iir = I915_READ(DEIIR); if (de_iir) { + I915_WRITE(DEIIR, de_iir); + ret = IRQ_HANDLED; if (INTEL_INFO(dev)->gen >= 7) ivb_display_irq_handler(dev, de_iir); else ilk_display_irq_handler(dev, de_iir); - I915_WRITE(DEIIR, de_iir); - ret = IRQ_HANDLED; } if (INTEL_INFO(dev)->gen >= 6) { u32 pm_iir = I915_READ(GEN6_PMIIR); if (pm_iir) { - gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; + gen6_rps_irq_handler(dev_priv, pm_iir); } } -- cgit v1.2.3 From 3ff60f89bc4836583f5bd195062f16c563bd97aa Mon Sep 17 00:00:00 2001 From: Oscar Mateo Date: Mon, 16 Jun 2014 16:10:58 +0100 Subject: drm/i915/vlv: Ack interrupts before handling them (VLV) Otherwise, we might receive a new interrupt before we have time to ack the first one, eventually missing it. Without an atomic XCHG operation with mmio space, this patch merely reduces the window in which we can miss an interrupt (especially when you consider how heavyweight the I915_READ/I915_WRITE operations are). Notice that, before clearing a port-sourced interrupt in the IIR, the corresponding interrupt source status in the PORT_HOTPLUG_STAT must be cleared. Spotted by Bob Beckett . v2: - Reorder the IIR clearing to reduce the window even further. - Add warning to commit message and comments to the code as per Chris Wilson's request. - Imre Deak pointed out that the pipe underrun flag might not be signaled in IIR, so do not make valleyview_pipestat_irq_handler depend on it. v3: Improve the source code comment. Signed-off-by: Oscar Mateo Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 67 +++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 35c7e3a5b046..c434a6848c0f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1816,26 +1816,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); - if (IS_G4X(dev)) { - u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; + if (hotplug_status) { + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + /* + * Make sure hotplug status is cleared before we clear IIR, or else we + * may miss hotplug events. + */ + POSTING_READ(PORT_HOTPLUG_STAT); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); - } else { - u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; + if (IS_G4X(dev)) { + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); - } + intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); + } else { + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; - if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && - hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) - dp_aux_irq_handler(dev); + intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); + } - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); - /* - * Make sure hotplug status is cleared before we clear IIR, or else we - * may miss hotplug events. - */ - POSTING_READ(PORT_HOTPLUG_STAT); + if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && + hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) + dp_aux_irq_handler(dev); + } } static irqreturn_t valleyview_irq_handler(int irq, void *arg) @@ -1846,29 +1848,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) irqreturn_t ret = IRQ_NONE; while (true) { - iir = I915_READ(VLV_IIR); + /* Find, clear, then process each source of interrupt */ + gt_iir = I915_READ(GTIIR); + if (gt_iir) + I915_WRITE(GTIIR, gt_iir); + pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) + I915_WRITE(GEN6_PMIIR, pm_iir); + + iir = I915_READ(VLV_IIR); + if (iir) { + /* Consume port before clearing IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); + I915_WRITE(VLV_IIR, iir); + } if (gt_iir == 0 && pm_iir == 0 && iir == 0) goto out; ret = IRQ_HANDLED; - snb_gt_irq_handler(dev, dev_priv, gt_iir); - - valleyview_pipestat_irq_handler(dev, iir); - - /* Consume port. Then clear IIR or we'll miss events */ - if (iir & I915_DISPLAY_PORT_INTERRUPT) - i9xx_hpd_irq_handler(dev); - + if (gt_iir) + snb_gt_irq_handler(dev, dev_priv, gt_iir); if (pm_iir) gen6_rps_irq_handler(dev_priv, pm_iir); - - I915_WRITE(GTIIR, gt_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - I915_WRITE(VLV_IIR, iir); + /* Call regardless, as some status bits might not be + * signalled in iir */ + valleyview_pipestat_irq_handler(dev, iir); } out: -- cgit v1.2.3 From 38cc46d73ed99dd7002f1406002e52d7975d16cc Mon Sep 17 00:00:00 2001 From: Oscar Mateo Date: Mon, 16 Jun 2014 16:10:59 +0100 Subject: drm/i915/bdw: Ack interrupts before handling them (GEN8) Otherwise, we might receive a new interrupt before we have time to ack the first one, eventually missing it. The right order should be: 1 - Disable Master Interrupt Control. 2 - Find the category of interrupt that is pending. 3 - Find the source(s) of the interrupt and clear the Interrupt Identity bits (IIR) 4 - Process the interrupt(s) that had bits set in the IIRs. 5 - Re-enable Master Interrupt Control. Without an atomic XCHG operation with mmio space, the above merely reduces the window in which we can miss an interrupt (especially when you consider how heavyweight the I915_READ/I915_WRITE operations are). Spotted by Bob Beckett . v2: Add warning to commit message and comments to the code as per Chris Wilson's request. v3: Improve the source code comment. Signed-off-by: Oscar Mateo Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 91 ++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c434a6848c0f..b6debdc869c8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1465,6 +1465,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { tmp = I915_READ(GEN8_GT_IIR(0)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(0), tmp); ret = IRQ_HANDLED; rcs = tmp >> GEN8_RCS_IRQ_SHIFT; bcs = tmp >> GEN8_BCS_IRQ_SHIFT; @@ -1472,7 +1473,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, notify_ring(dev, &dev_priv->ring[RCS]); if (bcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[BCS]); - I915_WRITE(GEN8_GT_IIR(0), tmp); } else DRM_ERROR("The master control interrupt lied (GT0)!\n"); } @@ -1480,6 +1480,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { tmp = I915_READ(GEN8_GT_IIR(1)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(1), tmp); ret = IRQ_HANDLED; vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) @@ -1487,7 +1488,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS2]); - I915_WRITE(GEN8_GT_IIR(1), tmp); } else DRM_ERROR("The master control interrupt lied (GT1)!\n"); } @@ -1495,10 +1495,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & GEN8_GT_PM_IRQ) { tmp = I915_READ(GEN8_GT_IIR(2)); if (tmp & dev_priv->pm_rps_events) { - ret = IRQ_HANDLED; - gen8_rps_irq_handler(dev_priv, tmp); I915_WRITE(GEN8_GT_IIR(2), tmp & dev_priv->pm_rps_events); + ret = IRQ_HANDLED; + gen8_rps_irq_handler(dev_priv, tmp); } else DRM_ERROR("The master control interrupt lied (PM)!\n"); } @@ -1506,11 +1506,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & GEN8_GT_VECS_IRQ) { tmp = I915_READ(GEN8_GT_IIR(3)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(3), tmp); ret = IRQ_HANDLED; vcs = tmp >> GEN8_VECS_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VECS]); - I915_WRITE(GEN8_GT_IIR(3), tmp); } else DRM_ERROR("The master control interrupt lied (GT3)!\n"); } @@ -2241,36 +2241,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) I915_WRITE(GEN8_MASTER_IRQ, 0); POSTING_READ(GEN8_MASTER_IRQ); + /* Find, clear, then process each source of interrupt */ + ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); if (master_ctl & GEN8_DE_MISC_IRQ) { tmp = I915_READ(GEN8_DE_MISC_IIR); - if (tmp & GEN8_DE_MISC_GSE) - intel_opregion_asle_intr(dev); - else if (tmp) - DRM_ERROR("Unexpected DE Misc interrupt\n"); - else - DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); - if (tmp) { I915_WRITE(GEN8_DE_MISC_IIR, tmp); ret = IRQ_HANDLED; + if (tmp & GEN8_DE_MISC_GSE) + intel_opregion_asle_intr(dev); + else + DRM_ERROR("Unexpected DE Misc interrupt\n"); } + else + DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); } if (master_ctl & GEN8_DE_PORT_IRQ) { tmp = I915_READ(GEN8_DE_PORT_IIR); - if (tmp & GEN8_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); - else if (tmp) - DRM_ERROR("Unexpected DE Port interrupt\n"); - else - DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); - if (tmp) { I915_WRITE(GEN8_DE_PORT_IIR, tmp); ret = IRQ_HANDLED; + if (tmp & GEN8_AUX_CHANNEL_A) + dp_aux_irq_handler(dev); + else + DRM_ERROR("Unexpected DE Port interrupt\n"); } + else + DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); } for_each_pipe(pipe) { @@ -2280,33 +2280,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) continue; pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); - if (pipe_iir & GEN8_PIPE_VBLANK) - intel_pipe_handle_vblank(dev, pipe); - - if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (pipe_iir) { + ret = IRQ_HANDLED; + I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + if (pipe_iir & GEN8_PIPE_VBLANK) + intel_pipe_handle_vblank(dev, pipe); - if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, - false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); - } + if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev, pipe); - if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { - DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", - pipe_name(pipe), - pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); - } + if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { + if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, + false)) + DRM_ERROR("Pipe %c FIFO underrun\n", + pipe_name(pipe)); + } - if (pipe_iir) { - ret = IRQ_HANDLED; - I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { + DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", + pipe_name(pipe), + pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); + } } else DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); } @@ -2318,13 +2317,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) * on older pch-split platforms. But this needs testing. */ u32 pch_iir = I915_READ(SDEIIR); - - cpt_irq_handler(dev, pch_iir); - if (pch_iir) { I915_WRITE(SDEIIR, pch_iir); ret = IRQ_HANDLED; - } + cpt_irq_handler(dev, pch_iir); + } else + DRM_ERROR("The master control interrupt lied (SDE)!\n"); + } I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); -- cgit v1.2.3 From 27b6c122512ca30399bb1b39cc42eda83901f304 Mon Sep 17 00:00:00 2001 From: Oscar Mateo Date: Mon, 16 Jun 2014 16:11:00 +0100 Subject: drm/i915/chv: Ack interrupts before handling them (CHV) Otherwise, we might receive a new interrupt before we have time to ack the first one, eventually missing it. Without an atomic XCHG operation with mmio space, this patch merely reduces the window in which we can miss an interrupt (especially when you consider how heavyweight the I915_READ/I915_WRITE operations are). Notice that, before clearing a port-sourced interrupt in the IIR, the corresponding interrupt source status in the PORT_HOTPLUG_STAT must be cleared. Spotted by Bob Beckett . v2: - Add warning to commit message and comments to the code as per Chris Wilson's request. - Imre Deak pointed out that the pipe underrun flag might not be signaled in IIR, so do not make valleyview_pipestat_irq_handler depend on it. v3: Improve the source code comment. Signed-off-by: Oscar Mateo Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b6debdc869c8..73f1849cfb75 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1898,21 +1898,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) if (master_ctl == 0 && iir == 0) break; + ret = IRQ_HANDLED; + I915_WRITE(GEN8_MASTER_IRQ, 0); - gen8_gt_irq_handler(dev, dev_priv, master_ctl); + /* Find, clear, then process each source of interrupt */ - valleyview_pipestat_irq_handler(dev, iir); + if (iir) { + /* Consume port before clearing IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); + I915_WRITE(VLV_IIR, iir); + } - /* Consume port. Then clear IIR or we'll miss events */ - i9xx_hpd_irq_handler(dev); + gen8_gt_irq_handler(dev, dev_priv, master_ctl); - I915_WRITE(VLV_IIR, iir); + /* Call regardless, as some status bits might not be + * signalled in iir */ + valleyview_pipestat_irq_handler(dev, iir); I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); POSTING_READ(GEN8_MASTER_IRQ); - - ret = IRQ_HANDLED; } return ret; -- cgit v1.2.3 From 13cf550448b58abf8f44f5d6a560f2d20871c965 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 18 Jun 2014 11:29:35 +1000 Subject: drm/i915: rework digital port IRQ handling (v2) The digital ports from Ironlake and up have the ability to distinguish between long and short HPD pulses. Displayport 1.1 only uses the short form to request link retraining usually, so we haven't really needed support for it until now. However with DP 1.2 MST we need to handle the short irqs on their own outside the modesetting locking the long hpd's involve. This patch adds the framework to distinguish between short/long to the current code base, to lay the basis for future DP 1.2 MST work. This should mean we get better bisectability in case of regression due to the new irq handling. v2: add GM45 support (untested, due to lack of hw) Signed-off-by: Dave Airlie Reviewed-by: Todd Previte [danvet: Fix conflicts in i915_irq.c with Oscar Mateo's irq handling race fixes and a trivial one in intel_drv.h with the psr code.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 5 ++ drivers/gpu/drm/i915/i915_irq.c | 160 +++++++++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/intel_ddi.c | 3 + drivers/gpu/drm/i915/intel_dp.c | 20 +++++ drivers/gpu/drm/i915/intel_drv.h | 4 +- 5 files changed, 183 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 38362863b1dc..b560efc6eb35 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1559,6 +1559,11 @@ struct drm_i915_private { struct i915_runtime_pm pm; + struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; + u32 long_hpd_port_mask; + u32 short_hpd_port_mask; + struct work_struct dig_port_work; + /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1c1ec22bc7ef..c50e3b41d6fe 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev, return true; } +static void i915_digport_work_func(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, dig_port_work); + unsigned long irqflags; + u32 long_port_mask, short_port_mask; + struct intel_digital_port *intel_dig_port; + int i, ret; + u32 old_bits = 0; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + long_port_mask = dev_priv->long_hpd_port_mask; + dev_priv->long_hpd_port_mask = 0; + short_port_mask = dev_priv->short_hpd_port_mask; + dev_priv->short_hpd_port_mask = 0; + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + for (i = 0; i < I915_MAX_PORTS; i++) { + bool valid = false; + bool long_hpd = false; + intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port || !intel_dig_port->hpd_pulse) + continue; + + if (long_port_mask & (1 << i)) { + valid = true; + long_hpd = true; + } else if (short_port_mask & (1 << i)) + valid = true; + + if (valid) { + ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); + if (ret == true) { + /* if we get true fallback to old school hpd */ + old_bits |= (1 << intel_dig_port->base.hpd_pin); + } + } + } + + if (old_bits) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + dev_priv->hpd_event_bits |= old_bits; + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + schedule_work(&dev_priv->hotplug_work); + } +} + /* * Handle hotplug events outside the interrupt handler proper. */ @@ -1521,23 +1568,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_THRESHOLD 5 +static int ilk_port_to_hotplug_shift(enum port port) +{ + switch (port) { + case PORT_A: + case PORT_E: + default: + return -1; + case PORT_B: + return 0; + case PORT_C: + return 8; + case PORT_D: + return 16; + } +} + +static int g4x_port_to_hotplug_shift(enum port port) +{ + switch (port) { + case PORT_A: + case PORT_E: + default: + return -1; + case PORT_B: + return 17; + case PORT_C: + return 19; + case PORT_D: + return 21; + } +} + +static inline enum port get_port_from_pin(enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_B: + return PORT_B; + case HPD_PORT_C: + return PORT_C; + case HPD_PORT_D: + return PORT_D; + default: + return PORT_A; /* no hpd */ + } +} + static inline void intel_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, + u32 dig_hotplug_reg, const u32 *hpd) { struct drm_i915_private *dev_priv = dev->dev_private; int i; + enum port port; bool storm_detected = false; + bool queue_dig = false, queue_hp = false; + u32 dig_shift; + u32 dig_port_mask = 0; if (!hotplug_trigger) return; - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_trigger); + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", + hotplug_trigger, dig_hotplug_reg); spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { + if (!(hpd[i] & hotplug_trigger)) + continue; + + port = get_port_from_pin(i); + if (port && dev_priv->hpd_irq_port[port]) { + bool long_hpd; + + if (IS_G4X(dev)) { + dig_shift = g4x_port_to_hotplug_shift(port); + long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; + } else { + dig_shift = ilk_port_to_hotplug_shift(port); + long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; + } + + DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); + /* for long HPD pulses we want to have the digital queue happen, + but we still want HPD storm detection to function. */ + if (long_hpd) { + dev_priv->long_hpd_port_mask |= (1 << port); + dig_port_mask |= hpd[i]; + } else { + /* for short HPD just trigger the digital queue */ + dev_priv->short_hpd_port_mask |= (1 << port); + hotplug_trigger &= ~hpd[i]; + } + queue_dig = true; + } + } + for (i = 1; i < HPD_NUM_PINS; i++) { if (hpd[i] & hotplug_trigger && dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { /* @@ -1557,7 +1685,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) continue; - dev_priv->hpd_event_bits |= (1 << i); + if (!(dig_port_mask & hpd[i])) { + dev_priv->hpd_event_bits |= (1 << i); + queue_hp = true; + } + if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { @@ -1586,7 +1718,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, * queue for otherwise the flush_work in the pageflip code will * deadlock. */ - schedule_work(&dev_priv->hotplug_work); + if (queue_dig) + schedule_work(&dev_priv->dig_port_work); + if (queue_hp) + schedule_work(&dev_priv->hotplug_work); } static void gmbus_irq_handler(struct drm_device *dev) @@ -1827,11 +1962,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) if (IS_G4X(dev)) { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); + intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); } else { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); + intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); } if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && @@ -1929,8 +2064,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); + intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -2036,8 +2175,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); + intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -4358,6 +4501,7 @@ void intel_irq_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index ded60139820e..efaf44b5ff25 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1705,6 +1705,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->cloneable = 0; intel_encoder->hot_plug = intel_ddi_hot_plug; + intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + dev_priv->hpd_irq_port[port] = intel_dig_port; + if (init_dp) dp_connector = intel_ddi_init_dp_connector(intel_dig_port); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 23b62b0d9e3b..2da413cba987 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -3815,6 +3815,22 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) intel_dp_check_link_status(intel_dp); } +bool +intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (long_hpd) + return true; + + /* + * we'll check the link status via the normal hot plug path later - + * but for short hpds we should check it now + */ + intel_dp_check_link_status(intel_dp); + return false; +} + /* Return which DP Port should be selected for Transcoder DP control */ int intel_trans_dp_port_sel(struct drm_crtc *crtc) @@ -4387,6 +4403,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, void intel_dp_init(struct drm_device *dev, int output_reg, enum port port) { + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -4443,6 +4460,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->cloneable = 0; intel_encoder->hot_plug = intel_dp_hot_plug; + intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + dev_priv->hpd_irq_port[port] = intel_dig_port; + if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { drm_encoder_cleanup(encoder); kfree(intel_dig_port); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0ef04ea68719..45afd25f9362 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -567,6 +567,7 @@ struct intel_digital_port { u32 saved_port_bits; struct intel_dp dp; struct intel_hdmi hdmi; + bool (*hpd_pulse)(struct intel_digital_port *, bool); }; static inline int @@ -850,6 +851,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); bool intel_dp_is_edp(struct drm_device *dev, enum port port); +bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, + bool long_hpd); void intel_edp_backlight_on(struct intel_dp *intel_dp); void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); @@ -861,7 +864,6 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); void intel_edp_psr_exit(struct drm_device *dev); void intel_edp_psr_init(struct drm_device *dev); - /* intel_dsi.c */ void intel_dsi_init(struct drm_device *dev); -- cgit v1.2.3 From a6cdb93a7a135b853353fffecbdc2e60ba56a016 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Mon, 30 Jun 2014 09:53:39 -0700 Subject: drm/i915: Implement MI decode for gen8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ipehr just carries Dword 0 and on Gen 8, offsets are located on Dword 2 and 3 of MI_SEMAPHORE_WAIT. This implementation was based on Ben's work and on Ville's suggestion for Ben Cc: Ville Syrjälä Cc: Ben Widawsky Signed-off-by: Rodrigo Vivi Reviewed-by: Ben Widawsky [danvet: Fixup format string.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 42 ++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c50e3b41d6fe..d672053fdb10 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2927,12 +2927,7 @@ static bool ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) { if (INTEL_INFO(dev)->gen >= 8) { - /* - * FIXME: gen8 semaphore support - currently we don't emit - * semaphores on bdw anyway, but this needs to be addressed when - * we merge that code. - */ - return false; + return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | @@ -2941,19 +2936,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) } static struct intel_engine_cs * -semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) +semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_engine_cs *signaller; int i; if (INTEL_INFO(dev_priv->dev)->gen >= 8) { - /* - * FIXME: gen8 semaphore support - currently we don't emit - * semaphores on bdw anyway, but this needs to be addressed when - * we merge that code. - */ - return NULL; + for_each_ring(signaller, dev_priv, i) { + if (ring == signaller) + continue; + + if (offset == signaller->semaphore.signal_ggtt[ring->id]) + return signaller; + } } else { u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; @@ -2966,8 +2962,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) } } - DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n", - ring->id, ipehr); + DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", + ring->id, ipehr, offset); return NULL; } @@ -2977,7 +2973,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; u32 cmd, ipehr, head; - int i; + u64 offset = 0; + int i, backwards; ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) @@ -2986,13 +2983,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) /* * HEAD is likely pointing to the dword after the actual command, * so scan backwards until we find the MBOX. But limit it to just 3 - * dwords. Note that we don't care about ACTHD here since that might + * or 4 dwords depending on the semaphore wait command size. + * Note that we don't care about ACTHD here since that might * point at at batch, and semaphores are always emitted into the * ringbuffer itself. */ head = I915_READ_HEAD(ring) & HEAD_ADDR; + backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; - for (i = 4; i; --i) { + for (i = backwards; i; --i) { /* * Be paranoid and presume the hw has gone off into the wild - * our ring is smaller than what the hardware (and hence @@ -3012,7 +3011,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) return NULL; *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; - return semaphore_wait_to_signaller_ring(ring, ipehr); + if (INTEL_INFO(ring->dev)->gen >= 8) { + offset = ioread32(ring->buffer->virtual_start + head + 12); + offset <<= 32; + offset = ioread32(ring->buffer->virtual_start + head + 8); + } + return semaphore_wait_to_signaller_ring(ring, ipehr, offset); } static int semaphore_passed(struct intel_engine_cs *ring) -- cgit v1.2.3 From 50c3dc970a09b3b60422a58934cc27a413288bab Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 27 Jun 2014 17:19:22 +0200 Subject: drm/fb-helper: Fix hpd vs. initial config races Some drivers need to be able to have a perfect race-free fbcon setup. Current drivers only enable hotplug processing after the call to drm_fb_helper_initial_config which leaves a tiny but important race. This race is especially noticable on embedded platforms where the driver itself enables the voltage for the hdmi output, since only then will monitors (after a bit of delay, as usual) respond by asserting the hpd pin. Most of the infrastructure is already there with the split-out drm_fb_helper_init. And drm_fb_helper_initial_config already has all the required locking to handle concurrent hpd events since commit 53f1904bced78d7c00f5d874c662ec3ac85d0f9f Author: Daniel Vetter Date: Thu Mar 20 14:26:35 2014 +0100 drm/fb-helper: improve drm_fb_helper_initial_config locking The only missing bit is making drm_fb_helper_hotplug_event save against concurrent calls of drm_fb_helper_initial_config. The only unprotected bit is the check for fb_helper->fb. With that drivers can first initialize the fb helper, then enabel hotplug processing and then set up the initial config all in a completely race-free manner. Update kerneldoc and convert i915 as a proof of concept. Feature requested by Thierry since his tegra driver atm reliably boots slowly enough to misses the hotplug event for an external hdmi screen, but also reliably boots to quickly for the hpd pin to be asserted when the fb helper calls into the hdmi ->detect function. Cc: Thierry Reding Signed-off-by: Daniel Vetter Signed-off-by: Thierry Reding Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 11 +++++------ drivers/gpu/drm/i915/i915_dma.c | 3 --- drivers/gpu/drm/i915/i915_drv.c | 2 -- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_irq.c | 4 ---- 5 files changed, 5 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 5aab5ab8b584..6cd687a20cd5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1609,8 +1609,10 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); * either the output polling work or a work item launched from the driver's * hotplug interrupt). * - * Note that the driver must ensure that this is only called _after_ the fb has - * been fully set up, i.e. after the call to drm_fb_helper_initial_config. + * Note that drivers may call this even before calling + * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows + * for a race-free fbcon setup and will make sure that the fbdev emulation will + * not miss any hotplug events. * * RETURNS: * 0 on success and a non-zero error code otherwise. @@ -1620,11 +1622,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) struct drm_device *dev = fb_helper->dev; u32 max_width, max_height; - if (!fb_helper->fb) - return 0; - mutex_lock(&fb_helper->dev->mode_config.mutex); - if (!drm_fb_helper_is_bound(fb_helper)) { + if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) { fb_helper->delayed_hotplug = true; mutex_unlock(&fb_helper->dev->mode_config.mutex); return 0; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6c656392d67d..cac9265f9757 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1375,9 +1375,6 @@ static int i915_load_modeset_init(struct drm_device *dev) */ intel_fbdev_initial_config(dev); - /* Only enable hotplug handling once the fbdev is fully set up. */ - dev_priv->enable_hotplug_processing = true; - drm_kms_helper_poll_init(dev); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 651e65e051c0..d935ab3718e1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -527,7 +527,6 @@ static int i915_drm_freeze(struct drm_device *dev) } drm_irq_uninstall(dev); - dev_priv->enable_hotplug_processing = false; intel_disable_gt_powersave(dev); @@ -655,7 +654,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) * notifications. * */ intel_hpd_init(dev); - dev_priv->enable_hotplug_processing = true; /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a47fbf60b781..5484f052d50c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1389,7 +1389,6 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct work_struct hotplug_work; - bool enable_hotplug_processing; struct { unsigned long hpd_last_jiffies; int hpd_cnt; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 267f069765ad..69a7960c36bb 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1109,10 +1109,6 @@ static void i915_hotplug_work_func(struct work_struct *work) bool changed = false; u32 hpd_event_bits; - /* HPD irq before everything is fully set up. */ - if (!dev_priv->enable_hotplug_processing) - return; - mutex_lock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); -- cgit v1.2.3 From 31685c258e0b0ad6aa486c5ec001382cf8a64212 Mon Sep 17 00:00:00 2001 From: Deepak S Date: Thu, 3 Jul 2014 17:33:01 -0400 Subject: drm/i915/vlv: WA for Turbo and RC6 to work together. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With RC6 enabled, BYT has an HW issue in determining the right Gfx busyness. WA for Turbo + RC6: Use SW based Gfx busy-ness detection to decide on increasing/decreasing the freq. This logic will monitor C0 counters of render/media power-wells over EI period and takes necessary action based on these values v2: Refactor duplicate code. (Ville) v3: Reformat the comments. (Ville) v4: Enable required counters and remove unwanted code (Ville) v5: Added frequency change acceleration support and remove kernel-doc style comments. (Ville) v6: Updated comment section and Fix w/a comment. (Ville) Signed-off-by: Deepak S Reviewed-by: Ville Syrjälä Signed-off-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 15 +++++ drivers/gpu/drm/i915/i915_irq.c | 133 +++++++++++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_reg.h | 11 ++++ drivers/gpu/drm/i915/intel_pm.c | 12 +++- 4 files changed, 167 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8fa8172fcfbb..41191f11af60 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -902,6 +902,12 @@ struct vlv_s0ix_state { u32 clock_gate_dis2; }; +struct intel_rps_ei_calc { + u32 cz_ts_ei; + u32 render_ei_c0; + u32 media_ei_c0; +}; + struct intel_gen6_power_mgmt { /* work and pm_iir are protected by dev_priv->irq_lock */ struct work_struct work; @@ -926,6 +932,8 @@ struct intel_gen6_power_mgmt { u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp0_freq; /* Non-overclocked max frequency. */ + u32 ei_interrupt_count; + int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; @@ -1527,6 +1535,13 @@ struct drm_i915_private { /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; + /* rps wa up ei calculation */ + struct intel_rps_ei_calc rps_up_ei; + + /* rps wa down ei calculation */ + struct intel_rps_ei_calc rps_down_ei; + + /* ilk-only ips/rps state. Everything in here is protected by the global * mchdev_lock in intel_pm.c */ struct intel_ilk_power_mgmt ips; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d672053fdb10..0b4a8ed76a54 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1272,6 +1272,131 @@ static void notify_ring(struct drm_device *dev, i915_queue_hangcheck(dev); } +static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, + struct intel_rps_ei_calc *rps_ei) +{ + u32 cz_ts, cz_freq_khz; + u32 render_count, media_count; + u32 elapsed_render, elapsed_media, elapsed_time; + u32 residency = 0; + + cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); + cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); + + render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); + media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); + + if (rps_ei->cz_ts_ei == 0) { + rps_ei->cz_ts_ei = cz_ts; + rps_ei->render_ei_c0 = render_count; + rps_ei->media_ei_c0 = media_count; + + return dev_priv->rps.cur_freq; + } + + elapsed_time = cz_ts - rps_ei->cz_ts_ei; + rps_ei->cz_ts_ei = cz_ts; + + elapsed_render = render_count - rps_ei->render_ei_c0; + rps_ei->render_ei_c0 = render_count; + + elapsed_media = media_count - rps_ei->media_ei_c0; + rps_ei->media_ei_c0 = media_count; + + /* Convert all the counters into common unit of milli sec */ + elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; + elapsed_render /= cz_freq_khz; + elapsed_media /= cz_freq_khz; + + /* + * Calculate overall C0 residency percentage + * only if elapsed time is non zero + */ + if (elapsed_time) { + residency = + ((max(elapsed_render, elapsed_media) * 100) + / elapsed_time); + } + + return residency; +} + +/** + * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU + * busy-ness calculated from C0 counters of render & media power wells + * @dev_priv: DRM device private + * + */ +static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) +{ + u32 residency_C0_up = 0, residency_C0_down = 0; + u8 new_delay, adj; + + dev_priv->rps.ei_interrupt_count++; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + + if (dev_priv->rps_up_ei.cz_ts_ei == 0) { + vlv_c0_residency(dev_priv, &dev_priv->rps_up_ei); + vlv_c0_residency(dev_priv, &dev_priv->rps_down_ei); + return dev_priv->rps.cur_freq; + } + + + /* + * To down throttle, C0 residency should be less than down threshold + * for continous EI intervals. So calculate down EI counters + * once in VLV_INT_COUNT_FOR_DOWN_EI + */ + if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { + + dev_priv->rps.ei_interrupt_count = 0; + + residency_C0_down = vlv_c0_residency(dev_priv, + &dev_priv->rps_down_ei); + } else { + residency_C0_up = vlv_c0_residency(dev_priv, + &dev_priv->rps_up_ei); + } + + new_delay = dev_priv->rps.cur_freq; + + adj = dev_priv->rps.last_adj; + /* C0 residency is greater than UP threshold. Increase Frequency */ + if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { + if (adj > 0) + adj *= 2; + else + adj = 1; + + if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) + new_delay = dev_priv->rps.cur_freq + adj; + + /* + * For better performance, jump directly + * to RPe if we're below it. + */ + if (new_delay < dev_priv->rps.efficient_freq) + new_delay = dev_priv->rps.efficient_freq; + + } else if (!dev_priv->rps.ei_interrupt_count && + (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { + if (adj < 0) + adj *= 2; + else + adj = -1; + /* + * This means, C0 residency is less than down threshold over + * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq + */ + if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) + new_delay = dev_priv->rps.cur_freq + adj; + } + + return new_delay; +} + static void gen6_pm_rps_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -1320,6 +1445,8 @@ static void gen6_pm_rps_work(struct work_struct *work) else new_delay = dev_priv->rps.min_freq_softlimit; adj = 0; + } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { + new_delay = vlv_calc_delay_from_C0_counters(dev_priv); } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { if (adj < 0) adj *= 2; @@ -4511,7 +4638,11 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); /* Let's track the enabled rps events */ - dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + if (IS_VALLEYVIEW(dev)) + /* WaGsvRC0ResidenncyMethod:VLV */ + dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; + else + dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; setup_timer(&dev_priv->gpu_error.hangcheck_timer, i915_hangcheck_elapsed, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8c0f70de9fd7..190d4bb5ad53 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -531,6 +531,7 @@ enum punit_power_well { #define PUNIT_REG_GPU_FREQ_STS 0xd8 #define GENFREQSTATUS (1<<0) #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc +#define PUNIT_REG_CZ_TIMESTAMP 0xce #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ @@ -556,6 +557,11 @@ enum punit_power_well { #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 +#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000 +#define VLV_RP_UP_EI_THRESHOLD 90 +#define VLV_RP_DOWN_EI_THRESHOLD 70 +#define VLV_INT_COUNT_FOR_DOWN_EI 5 + /* vlv2 north clock has */ #define CCK_FUSE_REG 0x8 #define CCK_FUSE_HPLL_FREQ_MASK 0x3 @@ -5394,6 +5400,7 @@ enum punit_power_well { #define VLV_GTLC_ALLOWWAKEERR (1 << 1) #define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) #define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) +#define VLV_GTLC_SURVIVABILITY_REG 0x130098 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_KERNEL 0x1 #define FORCEWAKE_USER 0x2 @@ -5541,6 +5548,8 @@ enum punit_power_well { #define GEN6_GT_GFX_RC6_LOCKED 0x138104 #define VLV_COUNTER_CONTROL 0x138104 #define VLV_COUNT_RANGE_HIGH (1<<15) +#define VLV_MEDIA_RC0_COUNT_EN (1<<5) +#define VLV_RENDER_RC0_COUNT_EN (1<<4) #define VLV_MEDIA_RC6_COUNT_EN (1<<1) #define VLV_RENDER_RC6_COUNT_EN (1<<0) #define GEN6_GT_GFX_RC6 0x138108 @@ -5549,6 +5558,8 @@ enum punit_power_well { #define GEN6_GT_GFX_RC6p 0x13810C #define GEN6_GT_GFX_RC6pp 0x138110 +#define VLV_RENDER_C0_COUNT_REG 0x138118 +#define VLV_MEDIA_C0_COUNT_REG 0x13811C #define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_READY (1<<31) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f2a40565ef98..d1af6419ec78 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3282,8 +3282,11 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) vlv_force_gfx_clock(dev_priv, false); - I915_WRITE(GEN6_PMINTRMSK, - gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); + if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) + I915_WRITE(GEN6_PMINTRMSK, ~dev_priv->pm_rps_events); + else + I915_WRITE(GEN6_PMINTRMSK, + gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); } void gen6_rps_idle(struct drm_i915_private *dev_priv) @@ -4125,6 +4128,7 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RP_DOWN_EI, 350000); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240); I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | @@ -4145,9 +4149,11 @@ static void valleyview_enable_rps(struct drm_device *dev) /* allows RC6 residency counter to work */ I915_WRITE(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | + VLV_RENDER_RC0_COUNT_EN | VLV_MEDIA_RC6_COUNT_EN | VLV_RENDER_RC6_COUNT_EN)); + if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; -- cgit v1.2.3 From bf225f20d51fb1b77d47fce0628159a3eda027b9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 10 Jul 2014 20:31:18 +0100 Subject: drm/i915: Move RPS evaluation interval counters to i915->rps Place the RPS counters inside the RPS struct. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 18 +++++++----------- drivers/gpu/drm/i915/i915_irq.c | 32 ++++++++++++++++---------------- 2 files changed, 23 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 647ea67d0b1d..263a8799eb59 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -902,10 +902,10 @@ struct vlv_s0ix_state { u32 clock_gate_dis2; }; -struct intel_rps_ei_calc { - u32 cz_ts_ei; - u32 render_ei_c0; - u32 media_ei_c0; +struct intel_rps_ei { + u32 cz_clock; + u32 render_c0; + u32 media_c0; }; struct intel_gen6_power_mgmt { @@ -940,6 +940,9 @@ struct intel_gen6_power_mgmt { bool enabled; struct delayed_work delayed_resume_work; + /* manual wa residency calculations */ + struct intel_rps_ei up_ei, down_ei; + /* * Protects RPS/RC6 register access and PCU communication. * Must be taken after struct_mutex if nested. @@ -1534,13 +1537,6 @@ struct drm_i915_private { /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; - /* rps wa up ei calculation */ - struct intel_rps_ei_calc rps_up_ei; - - /* rps wa down ei calculation */ - struct intel_rps_ei_calc rps_down_ei; - - /* ilk-only ips/rps state. Everything in here is protected by the global * mchdev_lock in intel_pm.c */ struct intel_ilk_power_mgmt ips; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0b4a8ed76a54..30fd63708b1a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1273,7 +1273,7 @@ static void notify_ring(struct drm_device *dev, } static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, - struct intel_rps_ei_calc *rps_ei) + struct intel_rps_ei *rps_ei) { u32 cz_ts, cz_freq_khz; u32 render_count, media_count; @@ -1286,22 +1286,22 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); - if (rps_ei->cz_ts_ei == 0) { - rps_ei->cz_ts_ei = cz_ts; - rps_ei->render_ei_c0 = render_count; - rps_ei->media_ei_c0 = media_count; + if (rps_ei->cz_clock == 0) { + rps_ei->cz_clock = cz_ts; + rps_ei->render_c0 = render_count; + rps_ei->media_c0 = media_count; return dev_priv->rps.cur_freq; } - elapsed_time = cz_ts - rps_ei->cz_ts_ei; - rps_ei->cz_ts_ei = cz_ts; + elapsed_time = cz_ts - rps_ei->cz_clock; + rps_ei->cz_clock = cz_ts; - elapsed_render = render_count - rps_ei->render_ei_c0; - rps_ei->render_ei_c0 = render_count; + elapsed_render = render_count - rps_ei->render_c0; + rps_ei->render_c0 = render_count; - elapsed_media = media_count - rps_ei->media_ei_c0; - rps_ei->media_ei_c0 = media_count; + elapsed_media = media_count - rps_ei->media_c0; + rps_ei->media_c0 = media_count; /* Convert all the counters into common unit of milli sec */ elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; @@ -1337,9 +1337,9 @@ static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - if (dev_priv->rps_up_ei.cz_ts_ei == 0) { - vlv_c0_residency(dev_priv, &dev_priv->rps_up_ei); - vlv_c0_residency(dev_priv, &dev_priv->rps_down_ei); + if (dev_priv->rps.up_ei.cz_clock == 0) { + vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); + vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); return dev_priv->rps.cur_freq; } @@ -1354,10 +1354,10 @@ static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) dev_priv->rps.ei_interrupt_count = 0; residency_C0_down = vlv_c0_residency(dev_priv, - &dev_priv->rps_down_ei); + &dev_priv->rps.down_ei); } else { residency_C0_up = vlv_c0_residency(dev_priv, - &dev_priv->rps_up_ei); + &dev_priv->rps.up_ei); } new_delay = dev_priv->rps.cur_freq; -- cgit v1.2.3 From 3497a5620caec0ae25e3fa3b6828f1cdeac80ec0 Mon Sep 17 00:00:00 2001 From: Deepak S Date: Thu, 10 Jul 2014 13:16:26 +0530 Subject: drm/i915/chv: Add basic PM interrupt support for CHV Enabled PM interrupt programming for CHV. Re-using gen8 code and extending same for CHV. Signed-off-by: Deepak S Reviewed-by: Mika Kuoppala Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 30fd63708b1a..188fe04c4f8d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1407,7 +1407,7 @@ static void gen6_pm_rps_work(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; - if (IS_BROADWELL(dev_priv->dev)) + if (IS_BROADWELL(dev_priv->dev) || IS_CHERRYVIEW(dev_priv->dev)) bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); else { /* Make sure not to corrupt PMIMR state used by ringbuffer */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1ec777a3914a..58a03f863379 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3392,6 +3392,8 @@ static void cherryview_disable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(GEN6_RC_CONTROL, 0); + + gen8_disable_rps_interrupts(dev); } static void valleyview_disable_rps(struct drm_device *dev) @@ -4109,6 +4111,8 @@ static void cherryview_enable_rps(struct drm_device *dev) valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); + gen8_enable_rps_interrupts(dev); + gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); } -- cgit v1.2.3 From 36cd7444c044806cd2a4e450a8385597221d5d25 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 2 May 2014 13:44:18 +1000 Subject: drm/i915: check connector->encoder before using it. DP MST will need connectors that aren't connected to specific encoders, add some checks in advance to avoid oopses. Reviewed-by: Todd Previte Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/i915_debugfs.c | 16 +++++++++------- drivers/gpu/drm/i915/i915_irq.c | 4 ++++ drivers/gpu/drm/i915/intel_display.c | 25 ++++++++++++++----------- 3 files changed, 27 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4a5b0f80e059..f27daf70ba72 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2209,13 +2209,15 @@ static void intel_connector_info(struct seq_file *m, seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); } - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) - intel_dp_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_HDMI) - intel_hdmi_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_LVDS) - intel_lvds_info(m, intel_connector); + if (intel_encoder) { + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || + intel_encoder->type == INTEL_OUTPUT_EDP) + intel_dp_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_HDMI) + intel_hdmi_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_LVDS) + intel_lvds_info(m, intel_connector); + } seq_printf(m, "\tmodes:\n"); list_for_each_entry(mode, &connector->modes, head) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 026f0a3f3b90..12d77b5368c4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1165,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work) dev_priv->hpd_event_bits = 0; list_for_each_entry(connector, &mode_config->connector_list, head) { intel_connector = to_intel_connector(connector); + if (!intel_connector->encoder) + continue; intel_encoder = intel_connector->encoder; if (intel_encoder->hpd_pin > HPD_NONE && dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && @@ -1195,6 +1197,8 @@ static void i915_hotplug_work_func(struct work_struct *work) list_for_each_entry(connector, &mode_config->connector_list, head) { intel_connector = to_intel_connector(connector); + if (!intel_connector->encoder) + continue; intel_encoder = intel_connector->encoder; if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { if (intel_encoder->hot_plug) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e07e6b5dee35..008bb3dc88cb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5003,20 +5003,23 @@ static void intel_connector_check_state(struct intel_connector *connector) "wrong connector dpms state\n"); WARN(connector->base.encoder != &encoder->base, "active connector not linked to encoder\n"); - WARN(!encoder->connectors_active, - "encoder->connectors_active not set\n"); - encoder_enabled = encoder->get_hw_state(encoder, &pipe); - WARN(!encoder_enabled, "encoder not enabled\n"); - if (WARN_ON(!encoder->base.crtc)) - return; + if (encoder) { + WARN(!encoder->connectors_active, + "encoder->connectors_active not set\n"); + + encoder_enabled = encoder->get_hw_state(encoder, &pipe); + WARN(!encoder_enabled, "encoder not enabled\n"); + if (WARN_ON(!encoder->base.crtc)) + return; - crtc = encoder->base.crtc; + crtc = encoder->base.crtc; - WARN(!crtc->enabled, "crtc not enabled\n"); - WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); - WARN(pipe != to_intel_crtc(crtc)->pipe, - "encoder active on the wrong pipe\n"); + WARN(!crtc->enabled, "crtc not enabled\n"); + WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); + WARN(pipe != to_intel_crtc(crtc)->pipe, + "encoder active on the wrong pipe\n"); + } } } -- cgit v1.2.3 From 0e32b39ceed665bfa4a77a4bc307b6652b991632 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 2 May 2014 14:02:48 +1000 Subject: drm/i915: add DP 1.2 MST support (v0.7) This adds DP 1.2 MST support on Haswell systems. Notes: a) this reworks irq handling for DP MST ports, so that we can avoid the mode config locking in the current hpd handlers, as we need to process up/down msgs at a better time. Changes since v0.1: use PORT_PCH_HOTPLUG to detect short vs long pulses add a workqueue to deal with digital events as they can get blocked on the main workqueue beyong mode_config mutex fix a bunch of modeset checker warnings acks irqs in the driver cleanup the MST encoders Changes since v0.2: check irq status again in work handler move around bring up and tear down to fix DPMS on/off use path properties. Changes since v0.3: updates for mst apis more state checker fixes irq handling improvements fbcon handling support improved reference counting of link - fixes redocking. Changes since v0.4: handle gpu reset hpd reinit without oopsing check link status on HPD irqs fix suspend/resume Changes since v0.5: use proper functions to get max link/lane counts fix another checker backtrace - due to connectors disappearing. set output type in more places fro, unknown->displayport don't talk to devices if no HPD asserted check mst on short irqs only check link status properly rebase onto prepping irq changes. drop unsued force_act Changes since v0.6: cleanup unused struct entry. [airlied: fix some sparse warnings]. Reviewed-by: Todd Previte Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_dma.c | 10 + drivers/gpu/drm/i915/i915_drv.c | 13 +- drivers/gpu/drm/i915/i915_drv.h | 9 + drivers/gpu/drm/i915/i915_irq.c | 6 +- drivers/gpu/drm/i915/intel_ddi.c | 75 ++++- drivers/gpu/drm/i915/intel_display.c | 40 ++- drivers/gpu/drm/i915/intel_dp.c | 234 ++++++++++++++- drivers/gpu/drm/i915/intel_dp_mst.c | 534 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 44 ++- drivers/gpu/drm/i915/intel_fbdev.c | 5 + drivers/gpu/drm/i915/intel_opregion.c | 1 + 12 files changed, 934 insertions(+), 38 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_dp_mst.c (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index cad1683d8bb5..91bd167e1cb7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -59,6 +59,7 @@ i915-y += dvo_ch7017.o \ intel_crt.o \ intel_ddi.o \ intel_dp.o \ + intel_dp_mst.o \ intel_dsi_cmd.o \ intel_dsi.o \ intel_dsi_pll.o \ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6df6506db919..d335c46ec6bc 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1717,6 +1717,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_mtrrfree; } + dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->dp_wq == NULL) { + DRM_ERROR("Failed to create our dp workqueue.\n"); + ret = -ENOMEM; + goto out_freewq; + } + intel_irq_init(dev); intel_uncore_sanitize(dev); @@ -1792,6 +1799,8 @@ out_gem_unload: intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); pm_qos_remove_request(&dev_priv->pm_qos); + destroy_workqueue(dev_priv->dp_wq); +out_freewq: destroy_workqueue(dev_priv->wq); out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); @@ -1892,6 +1901,7 @@ int i915_driver_unload(struct drm_device *dev) intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); + destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->wq); pm_qos_remove_request(&dev_priv->pm_qos); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 83cb43a24768..a361bb9bc243 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -518,7 +518,6 @@ static int i915_drm_freeze(struct drm_device *dev) flush_delayed_work(&dev_priv->rps.delayed_resume_work); - intel_runtime_pm_disable_interrupts(dev); intel_suspend_gt_powersave(dev); @@ -532,6 +531,9 @@ static int i915_drm_freeze(struct drm_device *dev) } drm_modeset_unlock_all(dev); + intel_dp_mst_suspend(dev); + intel_runtime_pm_disable_interrupts(dev); + intel_modeset_suspend_hw(dev); } @@ -646,6 +648,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) intel_modeset_init_hw(dev); + { + unsigned long irqflags; + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } + + intel_dp_mst_resume(dev); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2dc3a922a3c8..7db16bee2997 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1595,6 +1595,15 @@ struct drm_i915_private { u32 short_hpd_port_mask; struct work_struct dig_port_work; + /* + * if we get a HPD irq from DP and a HPD irq from non-DP + * the non-DP HPD could block the workqueue on a mode config + * mutex getting, that userspace may have taken. However + * userspace is waiting on the DP workqueue to run which is + * blocked behind the non-DP one. + */ + struct workqueue_struct *dp_wq; + /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 12d77b5368c4..07e1a409e488 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1846,7 +1846,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, * deadlock. */ if (queue_dig) - schedule_work(&dev_priv->dig_port_work); + queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); if (queue_hp) schedule_work(&dev_priv->hotplug_work); } @@ -4739,7 +4739,9 @@ void intel_hpd_init(struct drm_device *dev) list_for_each_entry(connector, &mode_config->connector_list, head) { struct intel_connector *intel_connector = to_intel_connector(connector); connector->polled = intel_connector->polled; - if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + connector->polled = DRM_CONNECTOR_POLL_HPD; + if (intel_connector->mst_port) connector->polled = DRM_CONNECTOR_POLL_HPD; } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 1aec4257e296..9b1542f1cf01 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -116,7 +116,10 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) struct drm_encoder *encoder = &intel_encoder->base; int type = intel_encoder->type; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || + if (type == INTEL_OUTPUT_DP_MST) { + struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary; + return intel_dig_port->port; + } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); @@ -584,8 +587,8 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, return (refclk * n * 100) / (p * r); } -static void intel_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_config *pipe_config) +void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; int link_clock = 0; @@ -755,8 +758,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) int type = intel_encoder->type; uint32_t temp; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { temp = TRANS_MSA_SYNC_CLK; switch (intel_crtc->config.pipe_bpp) { case 18: @@ -778,6 +780,21 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) } } +void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + uint32_t temp; + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (state == true) + temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + else + temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); +} + void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -857,7 +874,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - temp |= TRANS_DDI_MODE_SELECT_DP_SST; + if (intel_dp->is_mst) { + temp |= TRANS_DDI_MODE_SELECT_DP_MST; + } else + temp |= TRANS_DDI_MODE_SELECT_DP_SST; + + temp |= DDI_PORT_WIDTH(intel_dp->lane_count); + } else if (type == INTEL_OUTPUT_DP_MST) { + struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp; + + if (intel_dp->is_mst) { + temp |= TRANS_DDI_MODE_SELECT_DP_MST; + } else + temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(intel_dp->lane_count); } else { @@ -874,7 +903,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); uint32_t val = I915_READ(reg); - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); + val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val |= TRANS_DDI_PORT_NONE; I915_WRITE(reg, val); } @@ -913,8 +942,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) case TRANS_DDI_MODE_SELECT_DP_SST: if (type == DRM_MODE_CONNECTOR_eDP) return true; - case TRANS_DDI_MODE_SELECT_DP_MST: return (type == DRM_MODE_CONNECTOR_DisplayPort); + case TRANS_DDI_MODE_SELECT_DP_MST: + /* if the transcoder is in MST state then + * connector isn't connected */ + return false; case TRANS_DDI_MODE_SELECT_FDI: return (type == DRM_MODE_CONNECTOR_VGA); @@ -966,6 +998,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) { + if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST) + return false; + *pipe = i; return true; } @@ -1272,10 +1307,15 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) intel_wait_ddi_buf_idle(dev_priv, port); } - val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | + val = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) - val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + if (intel_dp->is_mst) + val |= DP_TP_CTL_MODE_MST; + else { + val |= DP_TP_CTL_MODE_SST; + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + } I915_WRITE(DP_TP_CTL(port), val); POSTING_READ(DP_TP_CTL(port)); @@ -1314,11 +1354,16 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - int type = intel_encoder->type; + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base); + int type = intel_dig_port->base.type; + + if (type != INTEL_OUTPUT_DISPLAYPORT && + type != INTEL_OUTPUT_EDP && + type != INTEL_OUTPUT_UNKNOWN) { + return; + } - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) - intel_dp_check_link_status(intel_dp); + intel_dp_hot_plug(intel_encoder); } void intel_ddi_get_config(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 421ea71b2e58..7b542b477a4e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -101,6 +101,14 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc); static void intel_set_pipe_csc(struct drm_crtc *crtc); static void vlv_prepare_pll(struct intel_crtc *crtc); +static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) +{ + if (!connector->mst_port) + return connector->encoder; + else + return &connector->mst_port->mst_encoders[pipe]->base; +} + typedef struct { int min, max; } intel_range_t; @@ -4130,6 +4138,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config.has_pch_encoder) lpt_pch_enable(crtc); + if (intel_crtc->config.dp_encoder_is_mst) + intel_ddi_set_vc_payload_alloc(crtc, true); + for_each_encoder_on_crtc(dev, crtc, encoder) { encoder->enable(encoder); intel_opregion_notify_encoder(encoder, true); @@ -4178,6 +4189,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_disable_pipe(dev_priv, pipe); + if (intel_crtc->config.dp_encoder_is_mst) + intel_ddi_set_vc_payload_alloc(crtc, false); + ironlake_pfit_disable(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) @@ -4336,6 +4350,9 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) case INTEL_OUTPUT_EDP: intel_dig_port = enc_to_dig_port(&intel_encoder->base); return port_to_power_domain(intel_dig_port->port); + case INTEL_OUTPUT_DP_MST: + intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; + return port_to_power_domain(intel_dig_port->port); case INTEL_OUTPUT_ANALOG: return POWER_DOMAIN_PORT_CRT; case INTEL_OUTPUT_DSI: @@ -5004,6 +5021,10 @@ static void intel_connector_check_state(struct intel_connector *connector) connector->base.base.id, connector->base.name); + /* there is no real hw state for MST connectors */ + if (connector->mst_port) + return; + WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, "wrong connector dpms state\n"); WARN(connector->base.encoder != &encoder->base, @@ -10524,6 +10545,14 @@ check_encoder_state(struct drm_device *dev) if (connector->base.dpms != DRM_MODE_DPMS_OFF) active = true; } + /* + * for MST connectors if we unplug the connector is gone + * away but the encoder is still connected to a crtc + * until a modeset happens in response to the hotplug. + */ + if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST) + continue; + WARN(!!encoder->base.crtc != enabled, "encoder's enabled state mismatch " "(expected %i, found %i)\n", @@ -11069,7 +11098,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, * for them. */ for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == &connector->base) { - connector->new_encoder = connector->encoder; + connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe); break; } } @@ -11115,7 +11144,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, new_crtc)) { return -EINVAL; } - connector->encoder->new_crtc = to_intel_crtc(new_crtc); + connector->new_encoder->new_crtc = to_intel_crtc(new_crtc); DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", connector->base.base.id, @@ -11149,7 +11178,12 @@ intel_modeset_stage_output_state(struct drm_device *dev, } } /* Now we've also updated encoder->new_crtc for all encoders. */ - + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->new_encoder) + if (connector->new_encoder != connector->encoder) + connector->encoder = connector->new_encoder; + } for_each_intel_crtc(dev, crtc) { crtc->new_enabled = false; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 30816b3b0742..e7a7953da6d1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -112,7 +112,7 @@ static void intel_dp_link_down(struct intel_dp *intel_dp); static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); -static int +int intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; @@ -740,8 +740,9 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) { struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); - sysfs_remove_link(&intel_connector->base.kdev->kobj, - intel_dp->aux.ddc.dev.kobj.name); + if (!intel_connector->mst_port) + sysfs_remove_link(&intel_connector->base.kdev->kobj, + intel_dp->aux.ddc.dev.kobj.name); intel_connector_unregister(intel_connector); } @@ -3309,6 +3310,33 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) edp_panel_vdd_off(intel_dp, false); } +static bool +intel_dp_probe_mst(struct intel_dp *intel_dp) +{ + u8 buf[1]; + + if (!intel_dp->can_mst) + return false; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) + return false; + + _edp_panel_vdd_on(intel_dp); + if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { + if (buf[0] & DP_MST_CAP) { + DRM_DEBUG_KMS("Sink is MST capable\n"); + intel_dp->is_mst = true; + } else { + DRM_DEBUG_KMS("Sink is not MST capable\n"); + intel_dp->is_mst = false; + } + } + edp_panel_vdd_off(intel_dp, false); + + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + return intel_dp->is_mst; +} + int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -3346,6 +3374,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) sink_irq_vector, 1) == 1; } +static bool +intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) +{ + int ret; + + ret = intel_dp_dpcd_read_wake(&intel_dp->aux, + DP_SINK_COUNT_ESI, + sink_irq_vector, 14); + if (ret != 14) + return false; + + return true; +} + static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { @@ -3353,6 +3395,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); } +static int +intel_dp_check_mst_status(struct intel_dp *intel_dp) +{ + bool bret; + + if (intel_dp->is_mst) { + u8 esi[16] = { 0 }; + int ret = 0; + int retry; + bool handled; + bret = intel_dp_get_sink_irq_esi(intel_dp, esi); +go_again: + if (bret == true) { + + /* check link status - esi[10] = 0x200c */ + if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { + DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } + + DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]); + ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); + + if (handled) { + for (retry = 0; retry < 3; retry++) { + int wret; + wret = drm_dp_dpcd_write(&intel_dp->aux, + DP_SINK_COUNT_ESI+1, + &esi[1], 3); + if (wret == 3) { + break; + } + } + + bret = intel_dp_get_sink_irq_esi(intel_dp, esi); + if (bret == true) { + DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]); + goto go_again; + } + } else + ret = 0; + + return ret; + } else { + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + /* send a hotplug event */ + drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev); + } + } + return -EINVAL; +} + /* * According to DP spec * 5.1.2: @@ -3361,7 +3460,6 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 4. Check link status on receipt of hot-plug interrupt */ - void intel_dp_check_link_status(struct intel_dp *intel_dp) { @@ -3581,6 +3679,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; enum intel_display_power_domain power_domain; struct edid *edid = NULL; + bool ret; intel_runtime_pm_get(dev_priv); @@ -3590,6 +3689,14 @@ intel_dp_detect(struct drm_connector *connector, bool force) DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + if (intel_dp->is_mst) { + /* MST devices are disconnected from a monitor POV */ + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + status = connector_status_disconnected; + goto out; + } + intel_dp->has_audio = false; if (HAS_PCH_SPLIT(dev)) @@ -3602,6 +3709,16 @@ intel_dp_detect(struct drm_connector *connector, bool force) intel_dp_probe_oui(intel_dp); + ret = intel_dp_probe_mst(intel_dp); + if (ret) { + /* if we are in MST mode then this connector + won't appear connected or have anything with EDID on it */ + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + status = connector_status_disconnected; + goto out; + } + if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); } else { @@ -3797,6 +3914,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) struct drm_device *dev = intel_dp_to_dev(intel_dp); drm_dp_aux_unregister(&intel_dp->aux); + intel_dp_mst_encoder_cleanup(intel_dig_port); drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); @@ -3825,28 +3943,62 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { .destroy = intel_dp_encoder_destroy, }; -static void +void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - - intel_dp_check_link_status(intel_dp); + return; } bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) { struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) + intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; - if (long_hpd) - return true; + DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, + long_hpd ? "long" : "short"); - /* - * we'll check the link status via the normal hot plug path later - - * but for short hpds we should check it now - */ - intel_dp_check_link_status(intel_dp); + if (long_hpd) { + if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) + goto mst_fail; + + if (!intel_dp_get_dpcd(intel_dp)) { + goto mst_fail; + } + + intel_dp_probe_oui(intel_dp); + + if (!intel_dp_probe_mst(intel_dp)) + goto mst_fail; + + } else { + if (intel_dp->is_mst) { + ret = intel_dp_check_mst_status(intel_dp); + if (ret == -EINVAL) + goto mst_fail; + } + + if (!intel_dp->is_mst) { + /* + * we'll check the link status via the normal hot plug path later - + * but for short hpds we should check it now + */ + intel_dp_check_link_status(intel_dp); + } + } return false; +mst_fail: + /* if we were in MST mode, and device is not there get out of MST mode */ + if (intel_dp->is_mst) { + DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + } + return true; } /* Return which DP Port should be selected for Transcoder DP control */ @@ -3897,7 +4049,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port) return false; } -static void +void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -4391,6 +4543,13 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp_aux_init(intel_dp, intel_connector); + /* init MST on ports that can support it */ + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + if (port == PORT_B || port == PORT_C || port == PORT_D) { + intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); + } + } + if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { drm_dp_aux_unregister(&intel_dp->aux); if (is_edp(intel_dp)) { @@ -4487,3 +4646,46 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) kfree(intel_connector); } } + +void intel_dp_mst_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* disable MST */ + for (i = 0; i < I915_MAX_PORTS; i++) { + struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port) + continue; + + if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { + if (!intel_dig_port->dp.can_mst) + continue; + if (intel_dig_port->dp.is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); + } + } +} + +void intel_dp_mst_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < I915_MAX_PORTS; i++) { + struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port) + continue; + if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { + int ret; + + if (!intel_dig_port->dp.can_mst) + continue; + + ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); + if (ret != 0) { + intel_dp_check_mst_status(&intel_dig_port->dp); + } + } + } +} diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c new file mode 100644 index 000000000000..ec48d562c7fc --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -0,0 +1,534 @@ +/* + * Copyright © 2008 Intel Corporation + * 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include +#include "i915_drv.h" +#include "intel_drv.h" +#include +#include + +static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = encoder->base.dev; + int bpp; + int lane_count, slots; + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; + struct intel_connector *found = NULL, *intel_connector; + int mst_pbn; + + pipe_config->dp_encoder_is_mst = true; + pipe_config->has_pch_encoder = false; + pipe_config->has_dp_encoder = true; + bpp = 24; + /* + * for MST we always configure max link bw - the spec doesn't + * seem to suggest we should do otherwise. + */ + lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + intel_dp->link_bw = intel_dp_max_link_bw(intel_dp); + intel_dp->lane_count = lane_count; + + pipe_config->pipe_bpp = 24; + pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + + list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + if (intel_connector->new_encoder == encoder) { + found = intel_connector; + break; + } + } + + if (!found) { + DRM_ERROR("can't find connector\n"); + return false; + } + + mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp); + + pipe_config->pbn = mst_pbn; + slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn); + + intel_link_compute_m_n(bpp, lane_count, + adjusted_mode->crtc_clock, + pipe_config->port_clock, + &pipe_config->dp_m_n); + + pipe_config->dp_m_n.tu = slots; + return true; + +} + +static void intel_mst_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + int ret; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port); + + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + if (ret) { + DRM_ERROR("failed to update payload %d\n", ret); + } +} + +static void intel_mst_post_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + /* this can fail */ + drm_dp_check_act_status(&intel_dp->mst_mgr); + /* and this can also fail */ + drm_dp_update_payload_part2(&intel_dp->mst_mgr); + + drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port); + + intel_dp->active_mst_links--; + intel_mst->port = NULL; + if (intel_dp->active_mst_links == 0) { + intel_dig_port->base.post_disable(&intel_dig_port->base); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + } +} + +static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + int ret; + uint32_t temp; + struct intel_connector *found = NULL, *intel_connector; + int slots; + struct drm_crtc *crtc = encoder->base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + if (intel_connector->new_encoder == encoder) { + found = intel_connector; + break; + } + } + + if (!found) { + DRM_ERROR("can't find connector\n"); + return; + } + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + intel_mst->port = found->port; + + if (intel_dp->active_mst_links == 0) { + enum port port = intel_ddi_get_encoder_port(encoder); + + I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel); + + intel_ddi_init_dp_buf_reg(&intel_dig_port->base); + + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + + + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } + + ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr, + intel_mst->port, intel_crtc->config.pbn, &slots); + if (ret == false) { + DRM_ERROR("failed to allocate vcpi\n"); + return; + } + + + intel_dp->active_mst_links++; + temp = I915_READ(DP_TP_STATUS(port)); + I915_WRITE(DP_TP_STATUS(port), temp); + + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); +} + +static void intel_mst_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + int ret; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT), + 1)) + DRM_ERROR("Timed out waiting for ACT sent\n"); + + ret = drm_dp_check_act_status(&intel_dp->mst_mgr); + + ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); +} + +static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + *pipe = intel_mst->pipe; + if (intel_mst->port) + return true; + return false; +} + +static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; + u32 temp, flags = 0; + + pipe_config->has_dp_encoder = true; + + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (temp & TRANS_DDI_PHSYNC) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + if (temp & TRANS_DDI_PVSYNC) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; + + switch (temp & TRANS_DDI_BPC_MASK) { + case TRANS_DDI_BPC_6: + pipe_config->pipe_bpp = 18; + break; + case TRANS_DDI_BPC_8: + pipe_config->pipe_bpp = 24; + break; + case TRANS_DDI_BPC_10: + pipe_config->pipe_bpp = 30; + break; + case TRANS_DDI_BPC_12: + pipe_config->pipe_bpp = 36; + break; + default: + break; + } + pipe_config->adjusted_mode.flags |= flags; + intel_dp_get_m_n(crtc, pipe_config); + + intel_ddi_clock_get(&intel_dig_port->base, pipe_config); +} + +static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct edid *edid; + int ret; + + edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); + if (!edid) + return 0; + + ret = intel_connector_update_modes(connector, edid); + kfree(edid); + + return ret; +} + +static enum drm_connector_status +intel_mst_port_dp_detect(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + + return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); +} + +static enum drm_connector_status +intel_dp_mst_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status; + status = intel_mst_port_dp_detect(connector); + return status; +} + +static int +intel_dp_mst_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void +intel_dp_mst_connector_destroy(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + if (!IS_ERR_OR_NULL(intel_connector->edid)) + kfree(intel_connector->edid); + + drm_connector_cleanup(connector); + kfree(connector); +} + +static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { + .dpms = intel_connector_dpms, + .detect = intel_dp_mst_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_dp_mst_set_property, + .destroy = intel_dp_mst_connector_destroy, +}; + +static int intel_dp_mst_get_modes(struct drm_connector *connector) +{ + return intel_dp_mst_get_ddc_modes(connector); +} + +static enum drm_mode_status +intel_dp_mst_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* TODO - validate mode against available PBN for link */ + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + + return MODE_OK; +} + +static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + return &intel_dp->mst_encoders[0]->base.base; +} + +static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { + .get_modes = intel_dp_mst_get_modes, + .mode_valid = intel_dp_mst_mode_valid, + .best_encoder = intel_mst_best_encoder, +}; + +static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + + drm_encoder_cleanup(encoder); + kfree(intel_mst); +} + +static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = { + .destroy = intel_dp_mst_encoder_destroy, +}; + +static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) +{ + if (connector->encoder) { + enum pipe pipe; + if (!connector->encoder->get_hw_state(connector->encoder, &pipe)) + return false; + return true; + } + return false; +} + +static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_connector *intel_connector; + struct drm_connector *connector; + int i; + + intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); + if (!intel_connector) + return NULL; + + connector = &intel_connector->base; + drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); + + intel_connector->unregister = intel_connector_unregister; + intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->mst_port = intel_dp; + intel_connector->port = port; + + for (i = PIPE_A; i <= PIPE_C; i++) { + drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_dp->mst_encoders[i]->base.base); + } + intel_dp_add_properties(intel_dp, connector); + + drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_mode_connector_set_path_property(connector, pathprop); + drm_reinit_primary_mode_group(dev); + mutex_lock(&dev->mode_config.mutex); + drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, connector); + mutex_unlock(&dev->mode_config.mutex); + drm_connector_register(&intel_connector->base); + return connector; +} + +static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + /* need to nuke the connector */ + mutex_lock(&dev->mode_config.mutex); + intel_connector_dpms(connector, DRM_MODE_DPMS_OFF); + mutex_unlock(&dev->mode_config.mutex); + + intel_connector->unregister(intel_connector); + + mutex_lock(&dev->mode_config.mutex); + drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, connector); + drm_connector_cleanup(connector); + mutex_unlock(&dev->mode_config.mutex); + + drm_reinit_primary_mode_group(dev); + + kfree(intel_connector); + DRM_DEBUG_KMS("\n"); +} + +static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + + drm_kms_helper_hotplug_event(dev); +} + +static struct drm_dp_mst_topology_cbs mst_cbs = { + .add_connector = intel_dp_add_mst_connector, + .destroy_connector = intel_dp_destroy_mst_connector, + .hotplug = intel_dp_mst_hotplug, +}; + +static struct intel_dp_mst_encoder * +intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe) +{ + struct intel_dp_mst_encoder *intel_mst; + struct intel_encoder *intel_encoder; + struct drm_device *dev = intel_dig_port->base.base.dev; + + intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); + + if (!intel_mst) + return NULL; + + intel_mst->pipe = pipe; + intel_encoder = &intel_mst->base; + intel_mst->primary = intel_dig_port; + + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, + DRM_MODE_ENCODER_DPMST); + + intel_encoder->type = INTEL_OUTPUT_DP_MST; + intel_encoder->crtc_mask = 0x7; + intel_encoder->cloneable = 0; + + intel_encoder->compute_config = intel_dp_mst_compute_config; + intel_encoder->disable = intel_mst_disable_dp; + intel_encoder->post_disable = intel_mst_post_disable_dp; + intel_encoder->pre_enable = intel_mst_pre_enable_dp; + intel_encoder->enable = intel_mst_enable_dp; + intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; + intel_encoder->get_config = intel_dp_mst_enc_get_config; + + return intel_mst; + +} + +static bool +intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port) +{ + int i; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + for (i = PIPE_A; i <= PIPE_C; i++) + intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i); + return true; +} + +int +intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + int ret; + + intel_dp->can_mst = true; + intel_dp->mst_mgr.cbs = &mst_cbs; + + /* create encoders */ + intel_dp_create_fake_mst_encoders(intel_dig_port); + ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id); + if (ret) { + intel_dp->can_mst = false; + return ret; + } + return 0; +} + +void +intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (!intel_dp->can_mst) + return; + + drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); + /* encoders will get killed by normal cleanup */ +} diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 40086e1a4ee3..1dfd1e518551 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -32,7 +32,7 @@ #include #include #include -#include +#include /** * _wait_for - magic (register) wait macro @@ -100,6 +100,7 @@ #define INTEL_OUTPUT_EDP 8 #define INTEL_OUTPUT_DSI 9 #define INTEL_OUTPUT_UNKNOWN 10 +#define INTEL_OUTPUT_DP_MST 11 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -207,6 +208,10 @@ struct intel_connector { /* since POLL and HPD connectors may use the same HPD line keep the native state of connector->polled in case hotplug storm detection changes it */ u8 polled; + + void *port; /* store this opaque as its illegal to dereference it */ + + struct intel_dp *mst_port; }; typedef struct dpll { @@ -351,6 +356,9 @@ struct intel_crtc_config { bool ips_enabled; bool double_wide; + + bool dp_encoder_is_mst; + int pbn; }; struct intel_pipe_wm { @@ -506,6 +514,7 @@ struct intel_hdmi { struct drm_display_mode *adjusted_mode); }; +struct intel_dp_mst_encoder; #define DP_MAX_DOWNSTREAM_PORTS 0x10 /** @@ -545,8 +554,16 @@ struct intel_dp { unsigned long last_power_on; unsigned long last_backlight_off; bool use_tps3; + bool can_mst; /* this port supports mst */ + bool is_mst; + int active_mst_links; + /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; + /* mst connector list */ + struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES]; + struct drm_dp_mst_topology_mgr mst_mgr; + uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); /* * This function returns the value we have to program the AUX_CTL @@ -573,6 +590,13 @@ struct intel_digital_port { bool (*hpd_pulse)(struct intel_digital_port *, bool); }; +struct intel_dp_mst_encoder { + struct intel_encoder base; + enum pipe pipe; + struct intel_digital_port *primary; + void *port; /* store this opaque as its illegal to dereference it */ +}; + static inline int vlv_dport_to_channel(struct intel_digital_port *dport) { @@ -657,6 +681,12 @@ enc_to_dig_port(struct drm_encoder *encoder) return container_of(encoder, struct intel_digital_port, base.base); } +static inline struct intel_dp_mst_encoder * +enc_to_mst(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_dp_mst_encoder, base.base); +} + static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { return &enc_to_dig_port(encoder)->dp; @@ -719,6 +749,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder); +void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config); +void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); /* intel_display.c */ const char *intel_output_name(int output); @@ -871,6 +904,15 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); void intel_edp_psr_exit(struct drm_device *dev); void intel_edp_psr_init(struct drm_device *dev); +int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd); +void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); +void intel_dp_mst_suspend(struct drm_device *dev); +void intel_dp_mst_resume(struct drm_device *dev); +int intel_dp_max_link_bw(struct intel_dp *intel_dp); +void intel_dp_hot_plug(struct intel_encoder *intel_encoder); +/* intel_dp_mst.c */ +int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); +void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); /* intel_dsi.c */ void intel_dsi_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index f475414671d8..7cdb8861bb7c 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -375,6 +375,11 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, } encoder = connector->encoder; + if (!encoder) { + struct drm_connector_helper_funcs *connector_funcs; + connector_funcs = connector->helper_private; + encoder = connector_funcs->best_encoder(connector); + } if (!encoder || WARN_ON(!encoder->crtc)) { DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", connector->name); diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 2e2c71fcc9ed..d37934b6338e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -352,6 +352,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, case INTEL_OUTPUT_UNKNOWN: case INTEL_OUTPUT_DISPLAYPORT: case INTEL_OUTPUT_HDMI: + case INTEL_OUTPUT_DP_MST: type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; break; case INTEL_OUTPUT_EDP: -- cgit v1.2.3 From 6af257cde0c536f69872cd335f186c3fdaa5f26a Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 15 Jul 2014 09:17:41 +0200 Subject: drm/i915: PM irq enabling is generic on gen8, too No need to list all the platforms explicitly. The prefix is a bit inconsistent since we usually pick gen8_ for GT related functions. But this anti-pattern is already established with snb, so material for a different patch. Cc: Damien Lespiau Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 188fe04c4f8d..7d61ca2a01df 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1407,7 +1407,7 @@ static void gen6_pm_rps_work(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; - if (IS_BROADWELL(dev_priv->dev) || IS_CHERRYVIEW(dev_priv->dev)) + if (INTEL_INFO(dev_priv->dev)->gen >= 8) bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); else { /* Make sure not to corrupt PMIMR state used by ringbuffer */ -- cgit v1.2.3 From 480c80338618867851659710d1a27c9cc85833d2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 16 Jul 2014 09:49:40 +0200 Subject: drm/i915: Use genX_ prefix for gt irq enable/disable functions Traditionally we use genX_ for GT/render stuff and the codenames for display stuff. But the gt and pm interrupt handling functions on gen5/6+ stuck out as exceptions, so convert them. Looking at the diff this nicely realigns our ducks since almost all the callers are already platform-specific functions following the genX_ pattern. Spotted while reviewing some internal rps patches. No function change in this patch. Acked-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 24 ++++++++++++------------ drivers/gpu/drm/i915/intel_drv.h | 12 ++++++------ drivers/gpu/drm/i915/intel_pm.c | 4 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++++++------ 4 files changed, 26 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7d61ca2a01df..dfe923a3cb92 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, POSTING_READ(GTIMR); } -void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) { ilk_update_gt_irq(dev_priv, mask, mask); } -void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) { ilk_update_gt_irq(dev_priv, mask, 0); } @@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, } } -void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { snb_update_pm_irq(dev_priv, mask, mask); } -void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { snb_update_pm_irq(dev_priv, mask, 0); } @@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, } } -void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { bdw_update_pm_irq(dev_priv, mask, mask); } -void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { bdw_update_pm_irq(dev_priv, mask, 0); } @@ -1408,10 +1408,10 @@ static void gen6_pm_rps_work(struct work_struct *work) pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; if (INTEL_INFO(dev_priv->dev)->gen >= 8) - bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); else { /* Make sure not to corrupt PMIMR state used by ringbuffer */ - snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); } spin_unlock_irq(&dev_priv->irq_lock); @@ -1553,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work) out: WARN_ON(dev_priv->l3_parity.which_slice); spin_lock_irqsave(&dev_priv->irq_lock, flags); - ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); + gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); mutex_unlock(&dev_priv->dev->struct_mutex); @@ -1567,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) return; spin_lock(&dev_priv->irq_lock); - ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); + gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); spin_unlock(&dev_priv->irq_lock); iir &= GT_PARITY_ERROR(dev); @@ -1622,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; - bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); @@ -1969,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (pm_iir & dev_priv->pm_rps_events) { spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; - snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6093ebdeb7cf..9d97a50cae4b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -683,12 +683,12 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable); -void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void intel_runtime_pm_disable_interrupts(struct drm_device *dev); void intel_runtime_pm_restore_interrupts(struct drm_device *dev); int intel_get_crtc_scanline(struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6e03851a4fa4..25ae4e6d3dd6 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3474,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); - bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); } @@ -3485,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); - snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 599709e80a16..b3d8f766fa7f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1004,7 +1004,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) - ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); + gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; @@ -1019,7 +1019,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) - ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); + gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -1212,7 +1212,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring) GT_PARITY_ERROR(dev))); else I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); + gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1232,7 +1232,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring) I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); else I915_WRITE_IMR(ring, ~0); - ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); + gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -1250,7 +1250,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); + gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1270,7 +1270,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { I915_WRITE_IMR(ring, ~0); - snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); + gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } -- cgit v1.2.3 From 813bde438c575e2c84eed9702143d915702847e3 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 4 Jul 2014 11:50:29 -0300 Subject: drm/i915: don't write powered down IRQ registers on Gen 8 If we enable unclaimed register reporting on Gen 8, we will discover that the IRQ registers for pipes B and C are also on the power well, so writes to them when the power well is disabled result in unclaimed register errors. Also, hsw_power_well_post_enable() already takes care of re-enabling them once the power well is enabled. Testcase: igt/pm_rpm/rte Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index dfe923a3cb92..6f19420cc1bf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3467,7 +3467,9 @@ static void gen8_irq_reset(struct drm_device *dev) gen8_gt_irq_reset(dev_priv); for_each_pipe(pipe) - GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); + if (intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); GEN5_IRQ_RESET(GEN8_DE_PORT_); GEN5_IRQ_RESET(GEN8_DE_MISC_); @@ -3800,8 +3802,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; for_each_pipe(pipe) - GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], - de_pipe_enables); + if (intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, + dev_priv->de_irq_mask[pipe], + de_pipe_enables); GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); } -- cgit v1.2.3 From d49bdb0e1054d022cc6f88fcecf9c79bae66eab0 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 4 Jul 2014 11:50:31 -0300 Subject: drm/i915: extract and improve gen8_irq_power_well_post_enable Move it from hsw_power_well_post_enable() (intel_pm.c) to i915_irq.c so we can reuse the nice IRQ macros we have there. The main difference is that now we're going to check if the IIR register is non-zero when we try to re-enable the interrupts. Signed-off-by: Paulo Zanoni Reviewed-by: Rodrigo Vivi Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 12 ++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 18 ++---------------- 3 files changed, 15 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6f19420cc1bf..e2e9bb8f4fe7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3478,6 +3478,18 @@ static void gen8_irq_reset(struct drm_device *dev) ibx_irq_reset(dev); } +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], + ~dev_priv->de_irq_mask[PIPE_B]); + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], + ~dev_priv->de_irq_mask[PIPE_C]); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + static void cherryview_irq_preinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9d97a50cae4b..bf415df11389 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -693,6 +693,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev); void intel_runtime_pm_restore_interrupts(struct drm_device *dev); int intel_get_crtc_scanline(struct intel_crtc *crtc); void i9xx_check_fifo_underruns(struct drm_device *dev); +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); /* intel_crt.c */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 25ae4e6d3dd6..d23ba37e6ab9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5998,7 +5998,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv, static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - unsigned long irqflags; /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -6014,21 +6013,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); - if (IS_BROADWELL(dev)) { - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), - dev_priv->de_irq_mask[PIPE_B]); - I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), - ~dev_priv->de_irq_mask[PIPE_B] | - GEN8_PIPE_VBLANK); - I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), - dev_priv->de_irq_mask[PIPE_C]); - I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), - ~dev_priv->de_irq_mask[PIPE_C] | - GEN8_PIPE_VBLANK); - POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - } + if (IS_BROADWELL(dev)) + gen8_irq_power_well_post_enable(dev_priv); } static void hsw_set_power_well(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From 9a76e4956b719ec32acdd37c80944423e4825bbf Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 20 Jun 2014 09:29:19 -0700 Subject: drm/i915: don't warn if IRQs are disabled when shutting down display IRQs This was always the case on our suspend path, but it was recently exposed by the change to use our runtime IRQ disable routine rather than the full DRM IRQ disable. Keep the warning on the enable side, as that really would indicate a bug. Signed-off-by: Jesse Barnes Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e2e9bb8f4fe7..3ae33e7e9d09 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (dev_priv->pm.irqs_disabled) return; if ((dev_priv->irq_mask & mask) != mask) { -- cgit v1.2.3 From 9df7575f1c751a333b1989d1c7e2b9fc884d9105 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 20 Jun 2014 09:29:20 -0700 Subject: drm/i915: add helper for checking whether IRQs are enabled Now that we use the runtime IRQ enable/disable functions in our suspend path, we can simply check the pm._irqs_disabled flag everywhere. So rename it to catch the users, and add an inline for it to make the checks clear everywhere. Signed-off-by: Jesse Barnes Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 16 ++++++++-------- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 10 +++++++++- drivers/gpu/drm/i915/intel_pm.c | 6 +++--- 7 files changed, 24 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fc39610fe12d..7e72800c5d17 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1996,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused) seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); seq_printf(m, "IRQs disabled: %s\n", - yesno(dev_priv->pm.irqs_disabled)); + yesno(!intel_irqs_enabled(dev_priv))); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7f4f2b745925..8b781f8ad3a9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1339,7 +1339,7 @@ struct ilk_wm_values { */ struct i915_runtime_pm { bool suspended; - bool irqs_disabled; + bool _irqs_disabled; }; enum intel_pipe_crc_source { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ef047bce008d..ed0b5fc4b6b0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1161,7 +1161,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, unsigned long timeout_expire; int ret; - WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); + WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) return 0; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3ae33e7e9d09..6774f88479ec 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; if ((dev_priv->irq_mask & mask) != 0) { @@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (dev_priv->pm.irqs_disabled) + if (!intel_irqs_enabled(dev_priv)) return; if ((dev_priv->irq_mask & mask) != mask) { @@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, { assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; dev_priv->gt_irq_mask &= ~interrupt_mask; @@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; new_val = dev_priv->pm_irq_mask; @@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; new_val = dev_priv->pm_irq_mask; @@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; I915_WRITE(SDEIMR, sdeimr); @@ -4774,7 +4774,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; dev->driver->irq_uninstall(dev); - dev_priv->pm.irqs_disabled = true; + dev_priv->pm._irqs_disabled = true; } /* Restore interrupts so we can recover from runtime PM. */ @@ -4782,7 +4782,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - dev_priv->pm.irqs_disabled = false; + dev_priv->pm._irqs_disabled = false; dev->driver->irq_preinstall(dev); dev->driver->irq_postinstall(dev); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 06566d6c328f..1cdd1c16d983 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7341,7 +7341,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) * gen-specific and since we only disable LCPLL after we fully disable * the interrupts, the check below should be enough. */ - WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n"); + WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); } static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index bf415df11389..8fc68c783228 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -691,11 +691,19 @@ void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void intel_runtime_pm_disable_interrupts(struct drm_device *dev); void intel_runtime_pm_restore_interrupts(struct drm_device *dev); +static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) +{ + /* + * We only use drm_irq_uninstall() at unload and VT switch, so + * this is the only thing we need to check. + */ + return !dev_priv->pm._irqs_disabled; +} + int intel_get_crtc_scanline(struct intel_crtc *crtc); void i9xx_check_fifo_underruns(struct drm_device *dev); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); - /* intel_crt.c */ void intel_crt_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d23ba37e6ab9..3f88f29a98c0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4976,7 +4976,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Interrupts should be disabled already to avoid re-arming. */ - WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled); + WARN_ON(intel_irqs_enabled(dev_priv)); flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -4991,7 +4991,7 @@ void intel_disable_gt_powersave(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Interrupts should be disabled already to avoid re-arming. */ - WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled); + WARN_ON(intel_irqs_enabled(dev_priv)); if (IS_IRONLAKE_M(dev)) { ironlake_disable_drps(dev); @@ -7069,5 +7069,5 @@ void intel_pm_setup(struct drm_device *dev) intel_gen6_powersave_work); dev_priv->pm.suspended = false; - dev_priv->pm.irqs_disabled = false; + dev_priv->pm._irqs_disabled = false; } -- cgit v1.2.3 From 95f25beddba2ec9510b249740bacc11eca70cf75 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 20 Jun 2014 09:29:22 -0700 Subject: drm/i915: set pm._irqs_disabled at IRQ init time Before we've installed the handler, we can set this and avoid confusing init code that then thinks IRQs are enabled and spews complaints everywhere. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6774f88479ec..6cdaecf0990c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -4669,6 +4669,9 @@ void intel_irq_init(struct drm_device *dev) pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); + /* Haven't installed the IRQ handler yet */ + dev_priv->pm._irqs_disabled = true; + if (IS_GEN2(dev)) { dev->max_vblank_count = 0; dev->driver->get_vblank_counter = i8xx_get_vblank_counter; -- cgit v1.2.3 From f260fe7b2f8c32e2fb02a36d3b76ef64b61dbdc5 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Tue, 5 Aug 2014 17:16:26 +0300 Subject: drm/i915: Don't accumulate hangcheck score on forward progress If the actual head has progressed forward inside a batch (request), don't accumulate hangcheck score. As the hangcheck score in increased only by acthd jumping backwards, the result is that we only declare an active batch as stuck if it is trapped inside a loop. Or that the looping will dominate the batch progression so that it overcomes the bonus that forward progress gives. v2: Improved commit message (Chris Wilson) Signed-off-by: Mika Kuoppala [danvet: s/active_loop/active (loop)/ as requested by Chris.] Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gpu_error.c | 2 ++ drivers/gpu/drm/i915/i915_irq.c | 15 ++++++++++++--- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 ++ 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0c945e91d542..7c478e6cedae 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -229,6 +229,8 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) return "wait"; case HANGCHECK_ACTIVE: return "active"; + case HANGCHECK_ACTIVE_LOOP: + return "active (loop)"; case HANGCHECK_KICK: return "kick"; case HANGCHECK_HUNG: diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6ef9d6fabf80..390ccc2a3096 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3189,8 +3189,14 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp; - if (ring->hangcheck.acthd != acthd) - return HANGCHECK_ACTIVE; + if (acthd != ring->hangcheck.acthd) { + if (acthd > ring->hangcheck.max_acthd) { + ring->hangcheck.max_acthd = acthd; + return HANGCHECK_ACTIVE; + } + + return HANGCHECK_ACTIVE_LOOP; + } if (IS_GEN2(dev)) return HANGCHECK_HUNG; @@ -3301,8 +3307,9 @@ static void i915_hangcheck_elapsed(unsigned long data) switch (ring->hangcheck.action) { case HANGCHECK_IDLE: case HANGCHECK_WAIT: - break; case HANGCHECK_ACTIVE: + break; + case HANGCHECK_ACTIVE_LOOP: ring->hangcheck.score += BUSY; break; case HANGCHECK_KICK: @@ -3322,6 +3329,8 @@ static void i915_hangcheck_elapsed(unsigned long data) */ if (ring->hangcheck.score > 0) ring->hangcheck.score--; + + ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; } ring->hangcheck.seqno = seqno; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ed5941078f92..70525d0c2c74 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -70,6 +70,7 @@ enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, HANGCHECK_ACTIVE, + HANGCHECK_ACTIVE_LOOP, HANGCHECK_KICK, HANGCHECK_HUNG, }; @@ -78,6 +79,7 @@ enum intel_ring_hangcheck_action { struct intel_ring_hangcheck { u64 acthd; + u64 max_acthd; u32 seqno; int score; enum intel_ring_hangcheck_action action; -- cgit v1.2.3 From 6323751d28b0cc785dab972eff6b7fca1a165a40 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 18 Aug 2014 15:37:02 +0300 Subject: drm/i915: fix HPD IRQ reenable work cancelation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Atm, the HPD IRQ reenable timer can get rearmed right after it's canceled. Also to access the HPD IRQ mask registers we need to wake up the HW. Solve both issues by converting the reenable timer to a delayed work and grabbing a runtime PM reference in the work. By this we can also forgo canceling the timer during runtime suspend, since the only important thing there is that the HW is awake when we write the registers and that's ensured by the RPM ref. So do the cancelation only during driver unload time; this is also a requirement for an upcoming patch where we want to cancel all HPD related works only during system suspend and driver unload time, but not during runtime suspend. Note that there is still a race between the HPD IRQ reenable work and drm_irq_uninstall() during driver unload, where the work can reenable the HPD IRQs disabled by drm_irq_uninstall(). This isn't a problem since the HPD IRQs will still be effectively masked by the first level interrupt mask. v2-3: - unchanged v4: - use proper API for changing the expiration time for an already pending delayed work (Jani) Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä (v2) Cc: stable@vger.kernel.org (3.16+) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_irq.c | 33 ++++++++++++--------------------- drivers/gpu/drm/i915/intel_display.c | 1 + 3 files changed, 14 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_irq.c') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4412f6a4383b..1263aacbe025 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1458,7 +1458,7 @@ struct drm_i915_private { } hpd_mark; } hpd_stats[HPD_NUM_PINS]; u32 hpd_event_bits; - struct timer_list hotplug_reenable_timer; + struct delayed_work hotplug_reenable_work; struct i915_fbc fbc; struct i915_drrs drrs; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 390ccc2a3096..0050ee9470f1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work) * some connectors */ if (hpd_disabled) { drm_kms_helper_poll_enable(dev); - mod_timer(&dev_priv->hotplug_reenable_timer, - jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); + mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } -static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) -{ - del_timer_sync(&dev_priv->hotplug_reenable_timer); -} - static void ironlake_rps_change_irq_handler(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - gen8_irq_reset(dev); } @@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(VLV_MASTER_IER, 0); - intel_hpd_irq_uninstall(dev_priv); - for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); @@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - ironlake_irq_reset(dev); } @@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - intel_hpd_irq_uninstall(dev_priv); - if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev) I915_WRITE(IIR, I915_READ(IIR)); } -static void intel_hpd_irq_reenable(unsigned long data) +static void intel_hpd_irq_reenable(struct work_struct *work) { - struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), + hotplug_reenable_work.work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; unsigned long irqflags; int i; + intel_runtime_pm_get(dev_priv); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { struct drm_connector *connector; @@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data) if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + intel_runtime_pm_put(dev_priv); } void intel_irq_init(struct drm_device *dev) @@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev) setup_timer(&dev_priv->gpu_error.hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); - setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, - (unsigned long) dev_priv); + INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, + intel_hpd_irq_reenable); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 283830045ee2..bcf8d18ffbb1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13104,6 +13104,7 @@ void intel_modeset_cleanup(struct drm_device *dev) */ drm_irq_uninstall(dev); cancel_work_sync(&dev_priv->hotplug_work); + cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); dev_priv->pm._irqs_disabled = true; /* -- cgit v1.2.3