diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2023-08-31 02:06:38 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2023-08-31 02:06:38 +0300 |
commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /drivers/gpu/drm/i915/display/intel_psr.c | |
parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
download | linux-1ac731c529cd4d6adbce134754b51ff7d822b145.tar.xz |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_psr.c')
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_psr.c | 587 |
1 files changed, 468 insertions, 119 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 7a72e15e6836..6badfff2b4a2 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -34,6 +34,7 @@ #include "intel_dp_aux.h" #include "intel_hdmi.h" #include "intel_psr.h" +#include "intel_psr_regs.h" #include "intel_snps_phy.h" #include "skl_universal_plane.h" @@ -152,7 +153,7 @@ static void psr_irq_control(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t imr_reg; - u32 mask, val; + u32 mask; if (DISPLAY_VER(dev_priv) >= 12) imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); @@ -164,10 +165,7 @@ static void psr_irq_control(struct intel_dp *intel_dp) mask |= psr_irq_post_exit_bit_get(intel_dp) | psr_irq_pre_entry_bit_get(intel_dp); - val = intel_de_read(dev_priv, imr_reg); - val &= ~psr_irq_mask_get(intel_dp); - val |= ~mask; - intel_de_write(dev_priv, imr_reg, val); + intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask); } static void psr_event_print(struct drm_i915_private *i915, @@ -245,8 +243,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) } if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { - u32 val; - drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", transcoder_name(cpu_transcoder)); @@ -260,9 +256,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) * again so we don't care about unmask the interruption * or unset irq_aux_error. */ - val = intel_de_read(dev_priv, imr_reg); - val |= psr_irq_psr_error_bit_get(intel_dp); - intel_de_write(dev_priv, imr_reg, val); + intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp)); schedule_work(&intel_dp->psr.work); } @@ -526,6 +520,17 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) return val; } +static int psr2_block_count_lines(struct intel_dp *intel_dp) +{ + return intel_dp->psr.io_wake_lines < 9 && + intel_dp->psr.fast_wake_lines < 9 ? 8 : 12; +} + +static int psr2_block_count(struct intel_dp *intel_dp) +{ + return psr2_block_count_lines(intel_dp) / 4; +} + static void hsw_activate_psr2(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -542,6 +547,13 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); val |= intel_psr2_get_tp_time(intel_dp); + if (DISPLAY_VER(dev_priv) >= 12) { + if (psr2_block_count(intel_dp) > 2) + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; + else + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + } + /* Wa_22012278275:adl-p */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { static const u8 map[] = { @@ -558,31 +570,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see * comments bellow for more information */ - u32 tmp, lines = 7; - - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + u32 tmp; - tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT; val |= tmp; - tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT; val |= tmp; } else if (DISPLAY_VER(dev_priv) >= 12) { - /* - * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default - * values from BSpec. In order to setting an optimal power - * consumption, lower than 4k resolution mode needs to decrease - * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution - * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. - */ - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; - val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); - val |= TGL_EDP_PSR2_FAST_WAKE(7); + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } else if (DISPLAY_VER(dev_priv) >= 9) { - val |= EDP_PSR2_IO_BUFFER_WAKE(7); - val |= EDP_PSR2_FAST_WAKE(7); + val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } if (intel_dp->psr.req_psr2_sdp_prior_scanline) @@ -591,12 +593,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 tmp; - /* Wa_1408330847 */ - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, - DIS_RAM_BYPASS_PSR2_MAN_TRACK, - DIS_RAM_BYPASS_PSR2_MAN_TRACK); - tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)); drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { @@ -637,13 +633,10 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp, u32 idle_frames) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 val; idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; - val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)); - val &= ~EDP_PSR2_IDLE_FRAME_MASK; - val |= idle_frames; - intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); + intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), + EDP_PSR2_IDLE_FRAME_MASK, idle_frames); } static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) @@ -708,6 +701,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, { const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u32 exit_scanlines; /* @@ -724,7 +718,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, if (crtc_state->enable_psr2_sel_fetch) return; - if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO)) + if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO)) return; if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) @@ -765,13 +759,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, return false; } - /* Wa_14010254185 Wa_14010103792 */ - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 sel fetch not enabled, missing the implementation of WAs\n"); - return false; - } - return crtc_state->enable_psr2_sel_fetch = true; } @@ -842,6 +829,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d return true; } +static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; + u8 max_wake_lines; + + if (DISPLAY_VER(i915) >= 12) { + io_wake_time = 42; + /* + * According to Bspec it's 42us, but based on testing + * it is not enough -> use 45 us. + */ + fast_wake_time = 45; + max_wake_lines = 12; + } else { + io_wake_time = 50; + fast_wake_time = 32; + max_wake_lines = 8; + } + + io_wake_lines = intel_usecs_to_scanlines( + &crtc_state->uapi.adjusted_mode, io_wake_time); + fast_wake_lines = intel_usecs_to_scanlines( + &crtc_state->uapi.adjusted_mode, fast_wake_time); + + if (io_wake_lines > max_wake_lines || + fast_wake_lines > max_wake_lines) + return false; + + if (i915->params.psr_safest_params) + io_wake_lines = fast_wake_lines = max_wake_lines; + + /* According to Bspec lower limit should be set as 7 lines. */ + intel_dp->psr.io_wake_lines = max(io_wake_lines, 7); + intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7); + + return true; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -936,6 +963,21 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (!_compute_psr2_wake_times(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, Unable to use long enough wake times\n"); + return false; + } + + /* Vblank >= PSR2_CTL Block Count Number maximum line count */ + if (crtc_state->hw.adjusted_mode.crtc_vblank_end - + crtc_state->hw.adjusted_mode.crtc_vblank_start < + psr2_block_count_lines(intel_dp)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, too short vblank time\n"); + return false; + } + if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { @@ -945,13 +987,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, } } - /* Wa_2209313811 */ - if (!crtc_state->enable_psr2_sel_fetch && - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { - drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n"); - goto unsupported; - } - if (!psr2_granularity_check(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n"); goto unsupported; @@ -1071,7 +1106,7 @@ void intel_psr_get_config(struct intel_encoder *encoder, } if (DISPLAY_VER(dev_priv) >= 12) { - val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder)); val &= EXITLINE_MASK; pipe_config->dc3co_exitline = val; } @@ -1119,6 +1154,34 @@ static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp) } } +/* + * Wa_16013835468 + * Wa_14015648006 + */ +static void wm_optimization_wa(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + bool set_wa_bit = false; + + /* Wa_14015648006 */ + if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || + IS_DISPLAY_VER(dev_priv, 11, 13)) + set_wa_bit |= crtc_state->wm_level_disabled; + + /* Wa_16013835468 */ + if (DISPLAY_VER(dev_priv) == 12) + set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start != + crtc_state->hw.adjusted_mode.crtc_vdisplay; + + if (set_wa_bit) + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + 0, wa_16013835468_bit_get(intel_dp)); + else + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + wa_16013835468_bit_get(intel_dp), 0); +} + static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -1145,19 +1208,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, psr_irq_control(intel_dp); - if (intel_dp->psr.dc3co_exitline) { - u32 val; - - /* - * TODO: if future platforms supports DC3CO in more than one - * transcoder, EXITLINE will need to be unset when disabling PSR - */ - val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)); - val &= ~EXITLINE_MASK; - val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT; - val |= EXITLINE_ENABLE; - intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); - } + /* + * TODO: if future platforms supports DC3CO in more than one + * transcoder, EXITLINE will need to be unset when disabling PSR + */ + if (intel_dp->psr.dc3co_exitline) + intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK, + intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE); if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, @@ -1168,18 +1225,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * Wa_16013835468 * Wa_14015648006 */ - if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || - IS_DISPLAY_VER(dev_priv, 12, 13)) { - u16 vtotal, vblank; - - vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal - - crtc_state->uapi.adjusted_mode.crtc_vdisplay; - vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end - - crtc_state->uapi.adjusted_mode.crtc_vblank_start; - if (vblank > vtotal) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, - wa_16013835468_bit_get(intel_dp)); - } + wm_optimization_wa(intel_dp, crtc_state); if (intel_dp->psr.psr2_enabled) { if (DISPLAY_VER(dev_priv) == 9) @@ -1199,13 +1245,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); - /* Wa_16011168373:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), - TRANS_SET_CONTEXT_LATENCY_MASK, - TRANS_SET_CONTEXT_LATENCY_VALUE(1)); - /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, @@ -1360,28 +1399,15 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); - /* Wa_1408330847 */ - if (intel_dp->psr.psr2_sel_fetch_enabled && - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, - DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); - /* * Wa_16013835468 * Wa_14015648006 */ - if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || - IS_DISPLAY_VER(dev_priv, 12, 13)) + if (DISPLAY_VER(dev_priv) >= 11) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, wa_16013835468_bit_get(intel_dp), 0); if (intel_dp->psr.psr2_enabled) { - /* Wa_16011168373:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), - TRANS_SET_CONTEXT_LATENCY_MASK, 0); - /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, @@ -1547,8 +1573,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); } -void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1559,10 +1585,28 @@ void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); } -void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + if (plane->id == PLANE_CURSOR) + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + plane_state->ctl); + else + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + PLANE_SEL_FETCH_CTL_ENABLE); +} + +void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1573,11 +1617,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, if (!crtc_state->enable_psr2_sel_fetch) return; - if (plane->id == PLANE_CURSOR) { - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), - plane_state->ctl); + if (plane->id == PLANE_CURSOR) return; - } clip = &plane_state->psr2_sel_fetch_area; @@ -1605,9 +1646,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); - - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), - PLANE_SEL_FETCH_CTL_ENABLE); } void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) @@ -1931,14 +1969,20 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, * - PSR disabled in new state * - All planes will go inactive * - Changing between PSR versions + * - Display WA #1136: skl, bxt */ needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state); needs_to_disable |= !new_crtc_state->has_psr; needs_to_disable |= !new_crtc_state->active_planes; needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled; + needs_to_disable |= DISPLAY_VER(i915) < 11 && + new_crtc_state->wm_level_disabled; if (psr->enabled && needs_to_disable) intel_psr_disable_locked(intel_dp); + else if (psr->enabled && new_crtc_state->wm_level_disabled) + /* Wa_14015648006 */ + wm_optimization_wa(intel_dp, new_crtc_state); mutex_unlock(&psr->lock); } @@ -1957,23 +2001,29 @@ static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_psr *psr = &intel_dp->psr; + bool keep_disabled = false; mutex_lock(&psr->lock); - if (psr->sink_not_reliable) - goto exit; - drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes); - /* Only enable if there is active planes */ - if (!psr->enabled && crtc_state->active_planes) + keep_disabled |= psr->sink_not_reliable; + keep_disabled |= !crtc_state->active_planes; + + /* Display WA #1136: skl, bxt */ + keep_disabled |= DISPLAY_VER(dev_priv) < 11 && + crtc_state->wm_level_disabled; + + if (!psr->enabled && !keep_disabled) intel_psr_enable_locked(intel_dp, crtc_state); + else if (psr->enabled && !crtc_state->wm_level_disabled) + /* Wa_14015648006 */ + wm_optimization_wa(intel_dp, crtc_state); /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ if (crtc_state->crc_enabled && psr->enabled) psr_force_hw_tracking_exit(intel_dp); -exit: mutex_unlock(&psr->lock); } } @@ -2647,3 +2697,302 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state) break; } } + +static void +psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + const char *status = "unknown"; + u32 val, status_val; + + if (intel_dp->psr.psr2_enabled) { + static const char * const live_status[] = { + "IDLE", + "CAPTURE", + "CAPTURE_FS", + "SLEEP", + "BUFON_FW", + "ML_UP", + "SU_STANDBY", + "FAST_SLEEP", + "DEEP_SLEEP", + "BUF_ON", + "TG_ON" + }; + val = intel_de_read(dev_priv, + EDP_PSR2_STATUS(intel_dp->psr.transcoder)); + status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; + } else { + static const char * const live_status[] = { + "IDLE", + "SRDONACK", + "SRDENT", + "BUFOFF", + "BUFON", + "AUXACK", + "SRDOFFACK", + "SRDENT_ON", + }; + val = intel_de_read(dev_priv, + EDP_PSR_STATUS(intel_dp->psr.transcoder)); + status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> + EDP_PSR_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; + } + + seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); +} + +static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_psr *psr = &intel_dp->psr; + intel_wakeref_t wakeref; + const char *status; + bool enabled; + u32 val; + + seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); + if (psr->sink_support) + seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); + seq_puts(m, "\n"); + + if (!psr->sink_support) + return 0; + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + mutex_lock(&psr->lock); + + if (psr->enabled) + status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; + else + status = "disabled"; + seq_printf(m, "PSR mode: %s\n", status); + + if (!psr->enabled) { + seq_printf(m, "PSR sink not reliable: %s\n", + str_yes_no(psr->sink_not_reliable)); + + goto unlock; + } + + if (psr->psr2_enabled) { + val = intel_de_read(dev_priv, + EDP_PSR2_CTL(intel_dp->psr.transcoder)); + enabled = val & EDP_PSR2_ENABLE; + } else { + val = intel_de_read(dev_priv, + EDP_PSR_CTL(intel_dp->psr.transcoder)); + enabled = val & EDP_PSR_ENABLE; + } + seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", + str_enabled_disabled(enabled), val); + psr_source_status(intel_dp, m); + seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", + psr->busy_frontbuffer_bits); + + /* + * SKL+ Perf counter is reset to 0 everytime DC state is entered + */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + val = intel_de_read(dev_priv, + EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); + val &= EDP_PSR_PERF_CNT_MASK; + seq_printf(m, "Performance counter: %u\n", val); + } + + if (psr->debug & I915_PSR_DEBUG_IRQ) { + seq_printf(m, "Last attempted entry at: %lld\n", + psr->last_entry_attempt); + seq_printf(m, "Last exit at: %lld\n", psr->last_exit); + } + + if (psr->psr2_enabled) { + u32 su_frames_val[3]; + int frame; + + /* + * Reading all 3 registers before hand to minimize crossing a + * frame boundary between register reads + */ + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { + val = intel_de_read(dev_priv, + PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); + su_frames_val[frame / 3] = val; + } + + seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); + + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { + u32 su_blocks; + + su_blocks = su_frames_val[frame / 3] & + PSR2_SU_STATUS_MASK(frame); + su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); + seq_printf(m, "%d\t%d\n", frame, su_blocks); + } + + seq_printf(m, "PSR2 selective fetch: %s\n", + str_enabled_disabled(psr->psr2_sel_fetch_enabled)); + } + +unlock: + mutex_unlock(&psr->lock); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + + return 0; +} + +static int i915_edp_psr_status_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + struct intel_dp *intel_dp = NULL; + struct intel_encoder *encoder; + + if (!HAS_PSR(dev_priv)) + return -ENODEV; + + /* Find the first EDP which supports PSR */ + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + intel_dp = enc_to_intel_dp(encoder); + break; + } + + if (!intel_dp) + return -ENODEV; + + return intel_psr_status(m, intel_dp); +} +DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status); + +static int +i915_edp_psr_debug_set(void *data, u64 val) +{ + struct drm_i915_private *dev_priv = data; + struct intel_encoder *encoder; + intel_wakeref_t wakeref; + int ret = -ENODEV; + + if (!HAS_PSR(dev_priv)) + return ret; + + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + // TODO: split to each transcoder's PSR debug state + ret = intel_psr_debug_set(intel_dp, val); + + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + } + + return ret; +} + +static int +i915_edp_psr_debug_get(void *data, u64 *val) +{ + struct drm_i915_private *dev_priv = data; + struct intel_encoder *encoder; + + if (!HAS_PSR(dev_priv)) + return -ENODEV; + + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + // TODO: split to each transcoder's PSR debug state + *val = READ_ONCE(intel_dp->psr.debug); + return 0; + } + + return -ENODEV; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, + i915_edp_psr_debug_get, i915_edp_psr_debug_set, + "%llu\n"); + +void intel_psr_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root, + i915, &i915_edp_psr_debug_fops); + + debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root, + i915, &i915_edp_psr_status_fops); +} + +static int i915_psr_sink_status_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + struct intel_dp *intel_dp = intel_attached_dp(connector); + static const char * const sink_status[] = { + "inactive", + "transition to active, capture and display", + "active, display from RFB", + "active, capture and display on sink device timings", + "transition to inactive, capture and display, timing re-sync", + "reserved", + "reserved", + "sink internal error", + }; + const char *str; + int ret; + u8 val; + + if (!CAN_PSR(intel_dp)) { + seq_puts(m, "PSR Unsupported\n"); + return -ENODEV; + } + + if (connector->base.status != connector_status_connected) + return -ENODEV; + + ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); + if (ret != 1) + return ret < 0 ? ret : -EIO; + + val &= DP_PSR_SINK_STATE_MASK; + if (val < ARRAY_SIZE(sink_status)) + str = sink_status[val]; + else + str = "unknown"; + + seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); + +static int i915_psr_status_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + struct intel_dp *intel_dp = intel_attached_dp(connector); + + return intel_psr_status(m, intel_dp); +} +DEFINE_SHOW_ATTRIBUTE(i915_psr_status); + +void intel_psr_connector_debugfs_add(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct dentry *root = connector->base.debugfs_entry; + + if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) + return; + + debugfs_create_file("i915_psr_sink_status", 0444, root, + connector, &i915_psr_sink_status_fops); + + if (HAS_PSR(i915)) + debugfs_create_file("i915_psr_status", 0444, root, + connector, &i915_psr_status_fops); +} |