diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 390 |
1 files changed, 209 insertions, 181 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e6ffef2f707a..fd97fe00cd0d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -180,7 +180,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, { uint32_t val; - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(bits & ~mask); val = I915_READ(PORT_HOTPLUG_EN); @@ -222,7 +222,7 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, { uint32_t new_val; - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(enabled_irq_mask & ~interrupt_mask); @@ -250,7 +250,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, uint32_t enabled_irq_mask) { - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(enabled_irq_mask & ~interrupt_mask); @@ -302,7 +302,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, WARN_ON(enabled_irq_mask & ~interrupt_mask); - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); new_val = dev_priv->pm_imr; new_val &= ~interrupt_mask; @@ -340,7 +340,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) { i915_reg_t reg = gen6_pm_iir(dev_priv); - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); I915_WRITE(reg, reset_mask); I915_WRITE(reg, reset_mask); @@ -349,7 +349,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) { - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); dev_priv->pm_ier |= enable_mask; I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); @@ -359,7 +359,7 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) { - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); dev_priv->pm_ier &= ~disable_mask; __gen6_mask_pm_irq(dev_priv, disable_mask); @@ -389,11 +389,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) -{ - return (mask & ~dev_priv->rps.pm_intr_keep); -} - void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) { if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) @@ -463,7 +458,7 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, uint32_t new_val; uint32_t old_val; - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(enabled_irq_mask & ~interrupt_mask); @@ -496,7 +491,7 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, { uint32_t new_val; - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(enabled_irq_mask & ~interrupt_mask); @@ -530,7 +525,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, WARN_ON(enabled_irq_mask & ~interrupt_mask); - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; @@ -546,7 +541,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, i915_reg_t reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(!intel_irqs_enabled(dev_priv)); if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || @@ -573,7 +568,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, i915_reg_t reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(!intel_irqs_enabled(dev_priv)); if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || @@ -728,6 +723,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); const struct drm_display_mode *mode = &intel_crtc->base.hwmode; + unsigned long irqflags; htotal = mode->crtc_htotal; hsync_start = mode->crtc_hsync_start; @@ -744,17 +740,21 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) high_frame = PIPEFRAME(pipe); low_frame = PIPEFRAMEPIXEL(pipe); + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + /* * High & low register fields aren't synchronized, so make sure * we get a low value that's stable across two reads of the high * register. */ do { - high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; - low = I915_READ(low_frame); - high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; + high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; + low = I915_READ_FW(low_frame); + high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; } while (high1 != high2); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + high1 >>= PIPE_FRAME_HIGH_SHIFT; pixel = low & PIPE_PIXEL_MASK; low >>= PIPE_FRAME_LOW_SHIFT; @@ -783,6 +783,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) enum pipe pipe = crtc->pipe; int position, vtotal; + if (!crtc->active) + return -1; + vtotal = mode->crtc_vtotal; if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; @@ -809,8 +812,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) for (i = 0; i < 100; i++) { udelay(1); - temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & - DSL_LINEMASK_GEN3; + temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; if (temp != position) { position = temp; break; @@ -1033,81 +1035,95 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) static void notify_ring(struct intel_engine_cs *engine) { - smp_store_mb(engine->breadcrumbs.irq_posted, true); - if (intel_engine_wakeup(engine)) - trace_i915_gem_request_notify(engine); + struct drm_i915_gem_request *rq = NULL; + struct intel_wait *wait; + + atomic_inc(&engine->irq_count); + set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); + + spin_lock(&engine->breadcrumbs.irq_lock); + wait = engine->breadcrumbs.irq_wait; + if (wait) { + /* We use a callback from the dma-fence to submit + * requests after waiting on our own requests. To + * ensure minimum delay in queuing the next request to + * hardware, signal the fence now rather than wait for + * the signaler to be woken up. We still wake up the + * waiter in order to handle the irq-seqno coherency + * issues (we may receive the interrupt before the + * seqno is written, see __i915_request_irq_complete()) + * and to handle coalescing of multiple seqno updates + * and many waiters. + */ + if (i915_seqno_passed(intel_engine_get_seqno(engine), + wait->seqno) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &wait->request->fence.flags)) + rq = i915_gem_request_get(wait->request); + + wake_up_process(wait->tsk); + } else { + __intel_engine_disarm_breadcrumbs(engine); + } + spin_unlock(&engine->breadcrumbs.irq_lock); + + if (rq) { + dma_fence_signal(&rq->fence); + i915_gem_request_put(rq); + } + + trace_intel_engine_notify(engine, wait); } static void vlv_c0_read(struct drm_i915_private *dev_priv, struct intel_rps_ei *ei) { - ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); + ei->ktime = ktime_get_raw(); ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); } -static bool vlv_c0_above(struct drm_i915_private *dev_priv, - const struct intel_rps_ei *old, - const struct intel_rps_ei *now, - int threshold) -{ - u64 time, c0; - unsigned int mul = 100; - - if (old->cz_clock == 0) - return false; - - if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) - mul <<= 8; - - time = now->cz_clock - old->cz_clock; - time *= threshold * dev_priv->czclk_freq; - - /* Workload can be split between render + media, e.g. SwapBuffers - * being blitted in X after being rendered in mesa. To account for - * this we need to combine both engines into our activity counter. - */ - c0 = now->render_c0 - old->render_c0; - c0 += now->media_c0 - old->media_c0; - c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; - - return c0 >= time; -} - void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) { - vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); - dev_priv->rps.up_ei = dev_priv->rps.down_ei; + memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); } static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) { + const struct intel_rps_ei *prev = &dev_priv->rps.ei; struct intel_rps_ei now; u32 events = 0; - if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) + if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) return 0; vlv_c0_read(dev_priv, &now); - if (now.cz_clock == 0) - return 0; - if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { - if (!vlv_c0_above(dev_priv, - &dev_priv->rps.down_ei, &now, - dev_priv->rps.down_threshold)) - events |= GEN6_PM_RP_DOWN_THRESHOLD; - dev_priv->rps.down_ei = now; - } + if (prev->ktime) { + u64 time, c0; + u32 render, media; + + time = ktime_us_delta(now.ktime, prev->ktime); + + time *= dev_priv->czclk_freq; - if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { - if (vlv_c0_above(dev_priv, - &dev_priv->rps.up_ei, &now, - dev_priv->rps.up_threshold)) - events |= GEN6_PM_RP_UP_THRESHOLD; - dev_priv->rps.up_ei = now; + /* Workload can be split between render + media, + * e.g. SwapBuffers being blitted in X after being rendered in + * mesa. To account for this we need to combine both engines + * into our activity counter. + */ + render = now.render_c0 - prev->render_c0; + media = now.media_c0 - prev->media_c0; + c0 = max(render, media); + c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ + + if (c0 > time * dev_priv->rps.up_threshold) + events = GEN6_PM_RP_UP_THRESHOLD; + else if (c0 < time * dev_priv->rps.down_threshold) + events = GEN6_PM_RP_DOWN_THRESHOLD; } + dev_priv->rps.ei = now; return events; } @@ -1127,30 +1143,21 @@ static void gen6_pm_rps_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, rps.work); - bool client_boost; + bool client_boost = false; int new_delay, adj, min, max; - u32 pm_iir; + u32 pm_iir = 0; spin_lock_irq(&dev_priv->irq_lock); - /* Speed up work cancelation during disabling rps interrupts. */ - if (!dev_priv->rps.interrupts_enabled) { - spin_unlock_irq(&dev_priv->irq_lock); - return; + if (dev_priv->rps.interrupts_enabled) { + pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); + client_boost = fetch_and_zero(&dev_priv->rps.client_boost); } - - pm_iir = dev_priv->rps.pm_iir; - dev_priv->rps.pm_iir = 0; - /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ - gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); - client_boost = dev_priv->rps.client_boost; - dev_priv->rps.client_boost = false; spin_unlock_irq(&dev_priv->irq_lock); /* Make sure we didn't queue anything we're not going to process. */ WARN_ON(pm_iir & ~dev_priv->pm_rps_events); - if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) - return; + goto out; mutex_lock(&dev_priv->rps.hw_lock); @@ -1173,20 +1180,12 @@ static void gen6_pm_rps_work(struct work_struct *work) if (new_delay >= dev_priv->rps.max_freq_softlimit) adj = 0; - /* - * For better performance, jump directly - * to RPe if we're below it. - */ - if (new_delay < dev_priv->rps.efficient_freq - adj) { - new_delay = dev_priv->rps.efficient_freq; - adj = 0; - } } else if (client_boost || any_waiters(dev_priv)) { adj = 0; } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) new_delay = dev_priv->rps.efficient_freq; - else + else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) new_delay = dev_priv->rps.min_freq_softlimit; adj = 0; } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { @@ -1209,9 +1208,19 @@ static void gen6_pm_rps_work(struct work_struct *work) new_delay += adj; new_delay = clamp_t(int, new_delay, min, max); - intel_set_rps(dev_priv, new_delay); + if (intel_set_rps(dev_priv, new_delay)) { + DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); + dev_priv->rps.last_adj = 0; + } mutex_unlock(&dev_priv->rps.hw_lock); + +out: + /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->rps.interrupts_enabled) + gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); + spin_unlock_irq(&dev_priv->irq_lock); } @@ -1347,10 +1356,20 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, static __always_inline void gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) { - if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) + bool tasklet = false; + + if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { + set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + tasklet = true; + } + + if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { notify_ring(engine); - if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) - tasklet_schedule(&engine->irq_tasklet); + tasklet |= i915.enable_guc_submission; + } + + if (tasklet) + tasklet_hi_schedule(&engine->irq_tasklet); } static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, @@ -1723,8 +1742,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); /* Handle flush interrupt in bottom half */ - queue_work(dev_priv->guc.log.flush_wq, - &dev_priv->guc.log.flush_work); + queue_work(dev_priv->guc.log.runtime.flush_wq, + &dev_priv->guc.log.runtime.flush_work); dev_priv->guc.log.flush_interrupt_count++; } else { @@ -2613,22 +2632,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) return ret; } -static void i915_error_wake_up(struct drm_i915_private *dev_priv) -{ - /* - * Notify all waiters for GPU completion events that reset state has - * been changed, and that they need to restart their wait after - * checking for potential errors (and bail out to drop locks if there is - * a gpu reset pending so that i915_error_work_func can acquire them). - */ - - /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ - wake_up_all(&dev_priv->gpu_error.wait_queue); - - /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ - wake_up_all(&dev_priv->pending_flip_queue); -} - /** * i915_reset_and_wakeup - do process context error handling work * @dev_priv: i915 device private @@ -2648,16 +2651,11 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("resetting chip\n"); kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); - /* - * In most cases it's guaranteed that we get here with an RPM - * reference held, for example because there is a pending GPU - * request that won't finish until the reset is done. This - * isn't the case at least when we get here by doing a - * simulated reset via debugs, so get an RPM reference. - */ - intel_runtime_pm_get(dev_priv); intel_prepare_reset(dev_priv); + set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); + wake_up_all(&dev_priv->gpu_error.wait_queue); + do { /* * All state reset _must_ be completed before we update the @@ -2672,12 +2670,11 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) /* We need to wait for anyone holding the lock to wakeup */ } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, - I915_RESET_IN_PROGRESS, + I915_RESET_HANDOFF, TASK_UNINTERRUPTIBLE, HZ)); intel_finish_reset(dev_priv); - intel_runtime_pm_put(dev_priv); if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) kobject_uevent_env(kobj, @@ -2687,6 +2684,7 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) * Note: The wake_up also serves as a memory barrier so that * waiters see the updated value of the dev_priv->gpu_error. */ + clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); wake_up_all(&dev_priv->gpu_error.reset_queue); } @@ -2764,31 +2762,29 @@ void i915_handle_error(struct drm_i915_private *dev_priv, vscnprintf(error_msg, sizeof(error_msg), fmt, args); va_end(args); + /* + * In most cases it's guaranteed that we get here with an RPM + * reference held, for example because there is a pending GPU + * request that won't finish until the reset is done. This + * isn't the case at least when we get here by doing a + * simulated reset via debugfs, so get an RPM reference. + */ + intel_runtime_pm_get(dev_priv); + i915_capture_error_state(dev_priv, engine_mask, error_msg); i915_clear_error_registers(dev_priv); if (!engine_mask) - return; + goto out; - if (test_and_set_bit(I915_RESET_IN_PROGRESS, + if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) - return; - - /* - * Wakeup waiting processes so that the reset function - * i915_reset_and_wakeup doesn't deadlock trying to grab - * various locks. By bumping the reset counter first, the woken - * processes will see a reset in progress and back off, - * releasing their locks and then wait for the reset completion. - * We must do this for _all_ gpu waiters that might hold locks - * that the reset work needs to acquire. - * - * Note: The wake_up also provides a memory barrier to ensure that the - * waiters see the updated value of the reset flags. - */ - i915_error_wake_up(dev_priv); + goto out; i915_reset_and_wakeup(dev_priv); + +out: + intel_runtime_pm_put(dev_priv); } /* Called from drm generic code, passed 'crtc' which @@ -3106,19 +3102,9 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, return enabled_irqs; } -static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) { - u32 hotplug_irqs, hotplug, enabled_irqs; - - if (HAS_PCH_IBX(dev_priv)) { - hotplug_irqs = SDE_HOTPLUG_MASK; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); - } else { - hotplug_irqs = SDE_HOTPLUG_MASK_CPT; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); - } - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + u32 hotplug; /* * Enable digital hotplug on the PCH, and configure the DP short pulse @@ -3126,10 +3112,12 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) * The pulse duration bits are reserved on LPT+. */ hotplug = I915_READ(PCH_PORT_HOTPLUG); - hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); - hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; - hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; + hotplug &= ~(PORTB_PULSE_DURATION_MASK | + PORTC_PULSE_DURATION_MASK | + PORTD_PULSE_DURATION_MASK); hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; + hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; + hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; /* * When CPU and PCH are on the same package, port A * HPD must be enabled in both north and south. @@ -3139,6 +3127,23 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } +static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + if (HAS_PCH_IBX(dev_priv)) { + hotplug_irqs = SDE_HOTPLUG_MASK; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); + } else { + hotplug_irqs = SDE_HOTPLUG_MASK_CPT; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); + } + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + ibx_hpd_detection_setup(dev_priv); +} + static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; @@ -3168,9 +3173,25 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) spt_hpd_detection_setup(dev_priv); } +static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug; + + /* + * Enable digital hotplug on the CPU, and configure the DP short pulse + * duration to 2ms (which is the minimum in the Display Port spec) + * The pulse duration bits are reserved on HSW+. + */ + hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); + hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; + hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | + DIGITAL_PORTA_PULSE_DURATION_2ms; + I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); +} + static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { - u32 hotplug_irqs, hotplug, enabled_irqs; + u32 hotplug_irqs, enabled_irqs; if (INTEL_GEN(dev_priv) >= 8) { hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; @@ -3189,15 +3210,7 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); } - /* - * Enable digital hotplug on the CPU, and configure the DP short pulse - * duration to 2ms (which is the minimum in the Display Port spec) - * The pulse duration bits are reserved on HSW+. - */ - hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); - hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; - hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; - I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); + ilk_hpd_detection_setup(dev_priv); ibx_hpd_irq_setup(dev_priv); } @@ -3268,7 +3281,7 @@ static void ibx_irq_postinstall(struct drm_device *dev) if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) - ; /* TODO: Enable HPD detection on older PCH platforms too */ + ibx_hpd_detection_setup(dev_priv); else spt_hpd_detection_setup(dev_priv); } @@ -3345,6 +3358,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev) gen5_gt_irq_postinstall(dev); + ilk_hpd_detection_setup(dev_priv); + ibx_irq_postinstall(dev); if (IS_IRONLAKE_M(dev_priv)) { @@ -3363,7 +3378,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) { - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) return; @@ -3378,7 +3393,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) { - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); if (!dev_priv->display_irqs_enabled) return; @@ -3485,6 +3500,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (IS_GEN9_LP(dev_priv)) bxt_hpd_detection_setup(dev_priv); + else if (IS_BROADWELL(dev_priv)) + ilk_hpd_detection_setup(dev_priv); } static int gen8_irq_postinstall(struct drm_device *dev) @@ -4052,7 +4069,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_en; - assert_spin_locked(&dev_priv->irq_lock); + lockdep_assert_held(&dev_priv->irq_lock); /* Note HDMI and DP share hotplug bits */ /* enable bits are the same for all generations */ @@ -4228,28 +4245,27 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* Let's track the enabled rps events */ if (IS_VALLEYVIEW(dev_priv)) /* WaGsvRC0ResidencyMethod:vlv */ - dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; + dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; - dev_priv->rps.pm_intr_keep = 0; + dev_priv->rps.pm_intrmsk_mbz = 0; /* - * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer + * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer * if GEN6_PM_UP_EI_EXPIRED is masked. * * TODO: verify if this can be reproduced on VLV,CHV. */ - if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) - dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; + if (INTEL_INFO(dev_priv)->gen <= 7) + dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; if (INTEL_INFO(dev_priv)->gen >= 8) - dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC; + dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; if (IS_GEN2(dev_priv)) { /* Gen2 doesn't have a hardware frame counter */ dev->max_vblank_count = 0; - dev->driver->get_vblank_counter = drm_vblank_no_hw_counter; } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = g4x_get_vblank_counter; @@ -4266,6 +4282,18 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (!IS_GEN2(dev_priv)) dev->vblank_disable_immediate = true; + /* Most platforms treat the display irq block as an always-on + * power domain. vlv/chv can disable it at runtime and need + * special care to avoid writing any of the display block registers + * outside of the power domain. We defer setting up the display irqs + * in this case to the runtime pm. + */ + dev_priv->display_irqs_enabled = true; + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->display_irqs_enabled = false; + + dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |