diff options
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_cx0_phy.c')
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_cx0_phy.c | 797 |
1 files changed, 486 insertions, 311 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 4a6c3040ca15..a82b93cbc81d 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -5,7 +5,11 @@ #include <linux/log2.h> #include <linux/math64.h> + +#include <drm/drm_print.h> + #include "i915_reg.h" +#include "i915_utils.h" #include "intel_cx0_phy.h" #include "intel_cx0_phy_regs.h" #include "intel_ddi.h" @@ -16,6 +20,7 @@ #include "intel_hdmi.h" #include "intel_panel.h" #include "intel_psr.h" +#include "intel_snps_hdmi_pll.h" #include "intel_tc.h" #define MB_WRITE_COMMITTED true @@ -31,10 +36,13 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C) + if (display->platform.pantherlake && phy == PHY_A) + return true; + + if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C) return true; return false; @@ -65,22 +73,22 @@ static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder) } static void -assert_dc_off(struct drm_i915_private *i915) +assert_dc_off(struct intel_display *display) { bool enabled; - enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF); - drm_WARN_ON(&i915->drm, !enabled); + enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF); + drm_WARN_ON(display->drm, !enabled); } static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); int lane; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane) - intel_de_rmw(i915, - XELPDP_PORT_MSGBUS_TIMER(i915, encoder->port, lane), + intel_de_rmw(display, + XELPDP_PORT_MSGBUS_TIMER(display, encoder->port, lane), XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, XELPDP_PORT_MSGBUS_TIMER_VAL); } @@ -96,12 +104,12 @@ static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder) */ static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder) { - intel_wakeref_t wakeref; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_wakeref_t wakeref; intel_psr_pause(intel_dp); - wakeref = intel_display_power_get(i915, POWER_DOMAIN_DC_OFF); + wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF); intel_cx0_program_msgbus_timer(encoder); return wakeref; @@ -109,35 +117,38 @@ static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *enc static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_resume(intel_dp); - intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref); + intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref); } static void intel_clear_response_ready_flag(struct intel_encoder *encoder, int lane) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane), + intel_de_rmw(display, + XELPDP_PORT_P2M_MSGBUS_STATUS(display, encoder->port, lane), 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); } static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy)); + drm_err_once(display->drm, + "Failed to bring PHY %c to idle.\n", + phy_name(phy)); return; } @@ -147,22 +158,23 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, int command, int lane, u32 *val) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - if (intel_de_wait_custom(i915, - XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane), + if (intel_de_wait_custom(display, + XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_MSGBUS_TIMEOUT_FAST_US, XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { - drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", + drm_dbg_kms(display->drm, + "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", phy_name(phy), *val); - if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(i915, port, lane)) & + if (!(intel_de_read(display, XELPDP_PORT_MSGBUS_TIMER(display, port, lane)) & XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT)) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Hardware did not detect a timeout\n", phy_name(phy)); @@ -171,14 +183,18 @@ static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, } if (*val & XELPDP_PORT_P2M_ERROR_SET) { - drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy), + drm_dbg_kms(display->drm, + "PHY %c Error occurred during %s command. Status: 0x%x\n", + phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); intel_cx0_bus_reset(encoder, lane); return -EINVAL; } if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) { - drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy), + drm_dbg_kms(display->drm, + "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", + phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); intel_cx0_bus_reset(encoder, lane); return -EINVAL; @@ -190,22 +206,22 @@ static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, static int __intel_cx0_read_once(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); int ack; u32 val; - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | XELPDP_PORT_M2P_COMMAND_READ | XELPDP_PORT_M2P_ADDRESS(addr)); @@ -221,7 +237,8 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder, * down and let the message bus to end up * in a known state */ - intel_cx0_bus_reset(encoder, lane); + if (DISPLAY_VER(display) < 30) + intel_cx0_bus_reset(encoder, lane); return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); } @@ -229,11 +246,11 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder, static u8 __intel_cx0_read(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); int i, status; - assert_dc_off(i915); + assert_dc_off(display); /* 3 tries is assumed to be enough to read successfully */ for (i = 0; i < 3; i++) { @@ -243,7 +260,8 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder, return status; } - drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n", + drm_err_once(display->drm, + "PHY %c Read %04x failed after %d retries.\n", phy_name(phy), addr, i); return 0; @@ -260,32 +278,32 @@ static u8 intel_cx0_read(struct intel_encoder *encoder, static int __intel_cx0_write_once(struct intel_encoder *encoder, int lane, u16 addr, u8 data, bool committed) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); int ack; u32 val; - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED : XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) | XELPDP_PORT_M2P_DATA(data) | XELPDP_PORT_M2P_ADDRESS(addr)); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; @@ -295,9 +313,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); if (ack < 0) return ack; - } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) & + } else if ((intel_de_read(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane)) & XELPDP_PORT_P2M_ERROR_SET)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Error occurred during write command.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -EINVAL; @@ -310,7 +328,8 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, * down and let the message bus to end up * in a known state */ - intel_cx0_bus_reset(encoder, lane); + if (DISPLAY_VER(display) < 30) + intel_cx0_bus_reset(encoder, lane); return 0; } @@ -318,11 +337,11 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, static void __intel_cx0_write(struct intel_encoder *encoder, int lane, u16 addr, u8 data, bool committed) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); int i, status; - assert_dc_off(i915); + assert_dc_off(display); /* 3 tries is assumed to be enough to write successfully */ for (i = 0; i < 3; i++) { @@ -332,7 +351,7 @@ static void __intel_cx0_write(struct intel_encoder *encoder, return; } - drm_err_once(&i915->drm, + drm_err_once(display->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i); } @@ -348,9 +367,9 @@ static void intel_cx0_write(struct intel_encoder *encoder, static void intel_c20_sram_write(struct intel_encoder *encoder, int lane, u16 addr, u16 data) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - assert_dc_off(i915); + assert_dc_off(display); intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0); intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0); @@ -362,10 +381,10 @@ static void intel_c20_sram_write(struct intel_encoder *encoder, static u16 intel_c20_sram_read(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u16 val; - assert_dc_off(i915); + assert_dc_off(display); intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0); intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1); @@ -429,7 +448,7 @@ static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state) void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_ddi_buf_trans *trans; u8 owned_lane_mask; intel_wakeref_t wakeref; @@ -444,7 +463,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, wakeref = intel_cx0_phy_transaction_begin(encoder); trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&i915->drm, !trans)) { + if (drm_WARN_ON_ONCE(display->drm, !trans)) { intel_cx0_phy_transaction_end(encoder, wakeref); return; } @@ -923,10 +942,10 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = { }, .mplla = { 0x3104, /* mplla cfg0 */ 0xd105, /* mplla cfg1 */ - 0xc025, /* mplla cfg2 */ - 0xc025, /* mplla cfg3 */ - 0xa6ab, /* mplla cfg4 */ - 0x8c00, /* mplla cfg5 */ + 0x9217, /* mplla cfg2 */ + 0x9217, /* mplla cfg3 */ + 0x8c00, /* mplla cfg4 */ + 0x759a, /* mplla cfg5 */ 0x4000, /* mplla cfg6 */ 0x0003, /* mplla cfg7 */ 0x3555, /* mplla cfg8 */ @@ -1122,6 +1141,22 @@ static const struct intel_c20pll_state * const xe2hpd_c20_dp_tables[] = { NULL, }; +static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = { + &mtl_c20_dp_rbr, + &xe2hpd_c20_edp_r216, + &xe2hpd_c20_edp_r243, + &mtl_c20_dp_hbr1, + &xe2hpd_c20_edp_r324, + &xe2hpd_c20_edp_r432, + &mtl_c20_dp_hbr2, + &xe2hpd_c20_edp_r675, + &mtl_c20_dp_hbr3, + &mtl_c20_dp_uhbr10, + &xe2hpd_c20_dp_uhbr13_5, + &mtl_c20_dp_uhbr20, + NULL, +}; + /* * HDMI link rates with 38.4 MHz reference clock. */ @@ -1970,19 +2005,6 @@ static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = { NULL, }; -static int intel_c10_phy_check_hdmi_link_rate(int clock) -{ - const struct intel_c10pll_state * const *tables = mtl_c10_hdmi_tables; - int i; - - for (i = 0; tables[i]; i++) { - if (clock == tables[i]->clock) - return MODE_OK; - } - - return MODE_CLOCK_RANGE; -} - static const struct intel_c10pll_state * const * intel_c10pll_tables_get(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) @@ -2000,51 +2022,81 @@ intel_c10pll_tables_get(struct intel_crtc_state *crtc_state, return NULL; } -static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static void intel_cx0pll_update_ssc(struct intel_encoder *encoder, + struct intel_cx0pll_state *pll_state, bool is_dp) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll; - int i; + struct intel_display *display = to_intel_display(encoder); - if (intel_crtc_has_dp_encoder(crtc_state)) { - if (intel_panel_use_ssc(i915)) { + if (is_dp) { + if (intel_panel_use_ssc(display)) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - pll_state->ssc_enabled = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); } } +} + +static void intel_c10pll_update_pll(struct intel_encoder *encoder, + struct intel_cx0pll_state *pll_state) +{ + struct intel_display *display = to_intel_display(encoder); + int i; if (pll_state->ssc_enabled) return; - drm_WARN_ON(&i915->drm, ARRAY_SIZE(pll_state->c10.pll) < 9); + drm_WARN_ON(display->drm, ARRAY_SIZE(pll_state->c10.pll) < 9); for (i = 4; i < 9; i++) pll_state->c10.pll[i] = 0; } +static int intel_c10pll_calc_state_from_table(struct intel_encoder *encoder, + const struct intel_c10pll_state * const *tables, + bool is_dp, int port_clock, + struct intel_cx0pll_state *pll_state) +{ + int i; + + for (i = 0; tables[i]; i++) { + if (port_clock == tables[i]->clock) { + pll_state->c10 = *tables[i]; + intel_cx0pll_update_ssc(encoder, pll_state, is_dp); + intel_c10pll_update_pll(encoder, pll_state); + pll_state->use_c10 = true; + + return 0; + } + } + + return -EINVAL; +} + static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { const struct intel_c10pll_state * const *tables; - int i; + int err; tables = intel_c10pll_tables_get(crtc_state, encoder); if (!tables) return -EINVAL; - for (i = 0; tables[i]; i++) { - if (crtc_state->port_clock == tables[i]->clock) { - crtc_state->dpll_hw_state.cx0pll.c10 = *tables[i]; - intel_c10pll_update_pll(crtc_state, encoder); - crtc_state->dpll_hw_state.cx0pll.use_c10 = true; + err = intel_c10pll_calc_state_from_table(encoder, tables, + intel_crtc_has_dp_encoder(crtc_state), + crtc_state->port_clock, + &crtc_state->dpll_hw_state.cx0pll); - return 0; - } - } + if (err == 0 || !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return err; - return -EINVAL; + /* For HDMI PLLs try SNPS PHY algorithm, if there are no precomputed tables */ + intel_snps_hdmi_pll_compute_c10pll(&crtc_state->dpll_hw_state.cx0pll.c10, + crtc_state->port_clock); + intel_c10pll_update_pll(encoder, + &crtc_state->dpll_hw_state.cx0pll); + crtc_state->dpll_hw_state.cx0pll.use_c10 = true; + + return 0; } static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, @@ -2073,25 +2125,16 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, intel_cx0_phy_transaction_end(encoder, wakeref); } -static void intel_c10_pll_program(struct drm_i915_private *i915, - const struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static void intel_c10_pll_program(struct intel_display *display, + struct intel_encoder *encoder, + const struct intel_c10pll_state *pll_state) { - const struct intel_c10pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c10; int i; intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); - /* Custom width needs to be programmed to 0 for both the phy lanes */ - intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH, - C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10, - MB_WRITE_COMMITTED); - intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), - 0, C10_VDR_CTRL_UPDATE_CFG, - MB_WRITE_COMMITTED); - /* Program the pll values only for the master lane */ for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i), @@ -2101,12 +2144,16 @@ static void intel_c10_pll_program(struct drm_i915_private *i915, intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED); intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED); + /* Custom width needs to be programmed to 0 for both the phy lanes */ + intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH, + C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10, + MB_WRITE_COMMITTED); intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); } -static void intel_c10pll_dump_hw_state(struct drm_i915_private *i915, +static void intel_c10pll_dump_hw_state(struct intel_display *display, const struct intel_c10pll_state *hw_state) { bool fracen; @@ -2115,35 +2162,77 @@ static void intel_c10pll_dump_hw_state(struct drm_i915_private *i915, unsigned int multiplier, tx_clk_div; fracen = hw_state->pll[0] & C10_PLL0_FRACEN; - drm_dbg_kms(&i915->drm, "c10pll_hw_state: fracen: %s, ", + drm_dbg_kms(display->drm, "c10pll_hw_state: fracen: %s, ", str_yes_no(fracen)); if (fracen) { frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11]; frac_rem = hw_state->pll[14] << 8 | hw_state->pll[13]; frac_den = hw_state->pll[10] << 8 | hw_state->pll[9]; - drm_dbg_kms(&i915->drm, "quot: %u, rem: %u, den: %u,\n", + drm_dbg_kms(display->drm, "quot: %u, rem: %u, den: %u,\n", frac_quot, frac_rem, frac_den); } multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 | hw_state->pll[2]) / 2 + 16; tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div); - drm_dbg_kms(&i915->drm, "c10pll_rawhw_state:"); - drm_dbg_kms(&i915->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn); + drm_dbg_kms(display->drm, "c10pll_rawhw_state:"); + drm_dbg_kms(display->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, + hw_state->cmn); BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4); for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4) - drm_dbg_kms(&i915->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", + drm_dbg_kms(display->drm, + "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", i, hw_state->pll[i], i + 1, hw_state->pll[i + 1], i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]); } -static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_state *pll_state) +/* + * Some ARLs SoCs have the same drm PCI IDs, so need a helper to differentiate based + * on the host bridge device ID to get the correct txx_mics value. + */ +static bool is_arrowlake_s_by_host_bridge(void) +{ + struct pci_dev *pdev = NULL; + u16 host_bridge_pci_dev_id; + + while ((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, pdev))) + host_bridge_pci_dev_id = pdev->device; + + return pdev && IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(host_bridge_pci_dev_id); +} + +static u16 intel_c20_hdmi_tmds_tx_cgf_1(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); + u16 tx_misc; + u16 tx_dcc_cal_dac_ctrl_range = 8; + u16 tx_term_ctrl = 2; + + if (DISPLAY_VER(display) >= 20) { + tx_misc = 5; + tx_term_ctrl = 4; + } else if (display->platform.battlemage) { + tx_misc = 0; + } else if (display->platform.meteorlake_u || + is_arrowlake_s_by_host_bridge()) { + tx_misc = 3; + } else { + tx_misc = 7; + } + + return (C20_PHY_TX_MISC(tx_misc) | + C20_PHY_TX_DCC_CAL_RANGE(tx_dcc_cal_dac_ctrl_range) | + C20_PHY_TX_DCC_BYPASS | C20_PHY_TX_TERM_CTL(tx_term_ctrl)); +} + +static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state) +{ + struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20; u64 datarate; u64 mpll_tx_clk_div; u64 vco_freq_shift; @@ -2155,10 +2244,10 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ u8 mpllb_ana_freq_vco; u8 mpll_div_multiplier; - if (pixel_clock < 25175 || pixel_clock > 600000) + if (crtc_state->port_clock < 25175 || crtc_state->port_clock > 600000) return -EINVAL; - datarate = ((u64)pixel_clock * 1000) * 10; + datarate = ((u64)crtc_state->port_clock * 1000) * 10; mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate)); vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate)); vco_freq = (datarate << vco_freq_shift) >> 8; @@ -2180,9 +2269,9 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ else mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0; - pll_state->clock = pixel_clock; + pll_state->clock = crtc_state->port_clock; pll_state->tx[0] = 0xbe88; - pll_state->tx[1] = 0x9800; + pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(crtc_state); pll_state->tx[2] = 0x0000; pll_state->cmn[0] = 0x0500; pll_state->cmn[1] = 0x0005; @@ -2210,42 +2299,23 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ return 0; } -static int intel_c20_phy_check_hdmi_link_rate(int clock) -{ - const struct intel_c20pll_state * const *tables = mtl_c20_hdmi_tables; - int i; - - for (i = 0; tables[i]; i++) { - if (clock == tables[i]->clock) - return MODE_OK; - } - - if (clock >= 25175 && clock <= 594000) - return MODE_OK; - - return MODE_CLOCK_RANGE; -} - -int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock) -{ - struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi); - - if (intel_encoder_is_c10phy(&dig_port->base)) - return intel_c10_phy_check_hdmi_link_rate(clock); - return intel_c20_phy_check_hdmi_link_rate(clock); -} - static const struct intel_c20pll_state * const * intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(crtc_state); if (intel_crtc_has_dp_encoder(crtc_state)) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - return xe2hpd_c20_edp_tables; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { + if (DISPLAY_RUNTIME_INFO(display)->edp_typec_support) + return xe3lpd_c20_dp_edp_tables; + if (DISPLAY_VERx100(display) == 1401) + return xe2hpd_c20_edp_tables; + } - if (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) + if (DISPLAY_VER(display) >= 30) + return xe3lpd_c20_dp_edp_tables; + else if (DISPLAY_VERx100(display) == 1401) return xe2hpd_c20_dp_tables; else return mtl_c20_dp_tables; @@ -2266,8 +2336,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, /* try computed C20 HDMI tables before using consolidated tables */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock, - &crtc_state->dpll_hw_state.cx0pll.c20) == 0) + if (intel_c20_compute_hdmi_tmds_pll(crtc_state) == 0) return 0; } @@ -2278,6 +2347,9 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, for (i = 0; tables[i]; i++) { if (crtc_state->port_clock == tables[i]->clock) { crtc_state->dpll_hw_state.cx0pll.c20 = *tables[i]; + intel_cx0pll_update_ssc(encoder, + &crtc_state->dpll_hw_state.cx0pll, + intel_crtc_has_dp_encoder(crtc_state)); crtc_state->dpll_hw_state.cx0pll.use_c10 = false; return 0; } @@ -2347,10 +2419,10 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c20pll_state *pll_state) { + struct intel_display *display = to_intel_display(encoder); bool cntx; intel_wakeref_t wakeref; int i; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -2362,11 +2434,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_TX_CNTX_CFG(i915, i)); + PHY_C20_B_TX_CNTX_CFG(display, i)); else pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_TX_CNTX_CFG(i915, i)); + PHY_C20_A_TX_CNTX_CFG(display, i)); } /* Read common configuration */ @@ -2374,11 +2446,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_CMN_CNTX_CFG(i915, i)); + PHY_C20_B_CMN_CNTX_CFG(display, i)); else pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_CMN_CNTX_CFG(i915, i)); + PHY_C20_A_CMN_CNTX_CFG(display, i)); } if (intel_c20phy_use_mpllb(pll_state)) { @@ -2387,11 +2459,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLB_CNTX_CFG(i915, i)); + PHY_C20_B_MPLLB_CNTX_CFG(display, i)); else pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLB_CNTX_CFG(i915, i)); + PHY_C20_A_MPLLB_CNTX_CFG(display, i)); } } else { /* MPLLA configuration */ @@ -2399,11 +2471,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLA_CNTX_CFG(i915, i)); + PHY_C20_B_MPLLA_CNTX_CFG(display, i)); else pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLA_CNTX_CFG(i915, i)); + PHY_C20_A_MPLLA_CNTX_CFG(display, i)); } } @@ -2412,33 +2484,37 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, intel_cx0_phy_transaction_end(encoder, wakeref); } -static void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, +static void intel_c20pll_dump_hw_state(struct intel_display *display, const struct intel_c20pll_state *hw_state) { int i; - drm_dbg_kms(&i915->drm, "c20pll_hw_state:\n"); - drm_dbg_kms(&i915->drm, "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", + drm_dbg_kms(display->drm, "c20pll_hw_state:\n"); + drm_dbg_kms(display->drm, + "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]); - drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", + drm_dbg_kms(display->drm, + "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]); if (intel_c20phy_use_mpllb(hw_state)) { for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++) - drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]); + drm_dbg_kms(display->drm, "mpllb[%d] = 0x%.4x\n", i, + hw_state->mpllb[i]); } else { for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++) - drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]); + drm_dbg_kms(display->drm, "mplla[%d] = 0x%.4x\n", i, + hw_state->mplla[i]); } } -void intel_cx0pll_dump_hw_state(struct drm_i915_private *i915, +void intel_cx0pll_dump_hw_state(struct intel_display *display, const struct intel_cx0pll_state *hw_state) { if (hw_state->use_c10) - intel_c10pll_dump_hw_state(i915, &hw_state->c10); + intel_c10pll_dump_hw_state(display, &hw_state->c10); else - intel_c20pll_dump_hw_state(i915, &hw_state->c20); + intel_c20pll_dump_hw_state(display, &hw_state->c20); } static u8 intel_c20_get_dp_rate(u32 clock) @@ -2538,20 +2614,15 @@ static int intel_get_c20_custom_width(u32 clock, bool dp) return 0; } -static void intel_c20_pll_program(struct drm_i915_private *i915, - const struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static void intel_c20_pll_program(struct intel_display *display, + struct intel_encoder *encoder, + const struct intel_c20pll_state *pll_state, + bool is_dp, int port_clock) { - const struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20; - bool dp = false; u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); - u32 clock = crtc_state->port_clock; bool cntx; int i; - if (intel_crtc_has_dp_encoder(crtc_state)) - dp = true; - /* 1. Read current context selection */ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0); @@ -2571,11 +2642,11 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_TX_CNTX_CFG(i915, i), + PHY_C20_A_TX_CNTX_CFG(display, i), pll_state->tx[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_TX_CNTX_CFG(i915, i), + PHY_C20_B_TX_CNTX_CFG(display, i), pll_state->tx[i]); } @@ -2583,11 +2654,11 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_CMN_CNTX_CFG(i915, i), + PHY_C20_A_CMN_CNTX_CFG(display, i), pll_state->cmn[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_CMN_CNTX_CFG(i915, i), + PHY_C20_B_CMN_CNTX_CFG(display, i), pll_state->cmn[i]); } @@ -2596,22 +2667,22 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLB_CNTX_CFG(i915, i), + PHY_C20_A_MPLLB_CNTX_CFG(display, i), pll_state->mpllb[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLB_CNTX_CFG(i915, i), + PHY_C20_B_MPLLB_CNTX_CFG(display, i), pll_state->mpllb[i]); } } else { for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLA_CNTX_CFG(i915, i), + PHY_C20_A_MPLLA_CNTX_CFG(display, i), pll_state->mplla[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLA_CNTX_CFG(i915, i), + PHY_C20_B_MPLLA_CNTX_CFG(display, i), pll_state->mplla[i]); } } @@ -2619,23 +2690,23 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, /* 4. Program custom width to match the link protocol */ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_WIDTH, PHY_C20_CUSTOM_WIDTH_MASK, - PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)), + PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(port_clock, is_dp)), MB_WRITE_COMMITTED); /* 5. For DP or 6. For HDMI */ - if (dp) { + if (is_dp) { intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(6) | PHY_C20_CUSTOM_SERDES_MASK, - BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)), + BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(port_clock)), MB_WRITE_COMMITTED); } else { intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(7) | PHY_C20_CUSTOM_SERDES_MASK, - is_hdmi_frl(clock) ? BIT(7) : 0, + is_hdmi_frl(port_clock) ? BIT(7) : 0, MB_WRITE_COMMITTED); intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, - intel_c20_get_hdmi_rate(clock), + intel_c20_get_hdmi_rate(port_clock), MB_WRITE_COMMITTED); } @@ -2675,13 +2746,14 @@ static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, } static void intel_program_port_clock_ctl(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, + const struct intel_cx0pll_state *pll_state, + bool is_dp, int port_clock, bool lane_reversal) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 val = 0; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port), XELPDP_PORT_REVERSAL, lane_reversal ? XELPDP_PORT_REVERSAL : 0); @@ -2690,22 +2762,21 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, val |= XELPDP_FORWARD_CLOCK_UNGATE; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && - is_hdmi_frl(crtc_state->port_clock)) - val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK); + if (!is_dp && is_hdmi_frl(port_clock)) + val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK); else - val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK); + val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK); /* TODO: HDMI FRL */ /* DP2.0 10G and 20G rates enable MPLLA*/ - if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000) - val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0; + if (port_clock == 1000000 || port_clock == 2000000) + val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0; else - val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; + val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | - XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA | + XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA | XELPDP_SSC_ENABLE_PLLB, val); } @@ -2734,48 +2805,49 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state) static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, u8 lane_mask, u8 state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port); + i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(display, port); int lane; - intel_de_rmw(i915, buf_ctl2_reg, + intel_de_rmw(display, buf_ctl2_reg, intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK), intel_cx0_get_powerdown_state(lane_mask, state)); /* Wait for pending transactions.*/ for_each_cx0_lane_in_mask(lane_mask, lane) - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); } - intel_de_rmw(i915, buf_ctl2_reg, + intel_de_rmw(display, buf_ctl2_reg, intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES), intel_cx0_get_powerdown_update(lane_mask)); /* Update Timeout Value */ - if (intel_de_wait_custom(i915, buf_ctl2_reg, + if (intel_de_wait_custom(display, buf_ctl2_reg, intel_cx0_get_powerdown_update(lane_mask), 0, XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); } static void intel_cx0_setup_powerdown(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), XELPDP_POWER_STATE_READY_MASK, XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY)); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(i915, port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port), XELPDP_POWER_STATE_ACTIVE_MASK | XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) | @@ -2807,7 +2879,7 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask) static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, bool lane_reversal) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); @@ -2820,48 +2892,51 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, XELPDP_LANE_PHY_CURRENT_STATUS(1)) : XELPDP_LANE_PHY_CURRENT_STATUS(0); - if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL1(display, port), XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of SOC reset after %dus.\n", phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, lane_pipe_reset); - if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_current_status, lane_phy_current_status, XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), intel_cx0_get_pclk_refclk_request(owned_lane_mask), intel_cx0_get_pclk_refclk_request(lane_mask)); - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port), intel_cx0_get_pclk_refclk_ack(owned_lane_mask), intel_cx0_get_pclk_refclk_ack(lane_mask), XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to request refclk after %dus.\n", phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US); intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, CX0_P2_STATE_RESET); intel_cx0_setup_powerdown(encoder); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0); + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(i915, port), + if (intel_de_wait_for_clear(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_current_status, XELPDP_PORT_RESET_END_TIMEOUT)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dms.\n", phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT); } -static void intel_cx0_program_phy_lane(struct drm_i915_private *i915, - struct intel_encoder *encoder, int lane_count, +static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count, bool lane_reversal) { int i; @@ -2927,13 +3002,14 @@ static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask) return val; } -static void intel_cx0pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static void __intel_cx0pll_enable(struct intel_encoder *encoder, + const struct intel_cx0pll_state *pll_state, + bool is_dp, int port_clock, int lane_count) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + bool lane_reversal = dig_port->lane_reversal; u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0; intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -2942,7 +3018,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, * 1. Program PORT_CLOCK_CTL REGISTER to configure * clock muxes, gating and SSC */ - intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal); + intel_program_port_clock_ctl(encoder, pll_state, is_dp, port_clock, lane_reversal); /* 2. Bring PHY out of reset. */ intel_cx0_phy_lane_reset(encoder, lane_reversal); @@ -2962,15 +3038,15 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, /* 5. Program PHY internal PLL internal registers. */ if (intel_encoder_is_c10phy(encoder)) - intel_c10_pll_program(i915, crtc_state, encoder); + intel_c10_pll_program(display, encoder, &pll_state->c10); else - intel_c20_pll_program(i915, crtc_state, encoder); + intel_c20_pll_program(display, encoder, &pll_state->c20, is_dp, port_clock); /* * 6. Program the enabled and disabled owned PHY lane * transmitters over message bus */ - intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal); + intel_cx0_program_phy_lane(encoder, lane_count, lane_reversal); /* * 7. Follow the Display Voltage Frequency Switching - Sequence @@ -2981,23 +3057,22 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, * 8. Program DDI_CLK_VALFREQ to match intended DDI * clock frequency. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), - crtc_state->port_clock); + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), port_clock); /* * 9. Set PORT_CLOCK_CTL register PCLK PLL Request * LN<Lane for maxPCLK> to "1" to enable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_request(maxpclk_lane)); /* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_ack(maxpclk_lane), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n", + drm_warn(display->drm, "Port %c PLL not locked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US); /* @@ -3009,17 +3084,26 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, intel_cx0_phy_transaction_end(encoder, wakeref); } +static void intel_cx0pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + __intel_cx0pll_enable(encoder, &crtc_state->dpll_hw_state.cx0pll, + intel_crtc_has_dp_encoder(crtc_state), + crtc_state->port_clock, crtc_state->lane_count); +} + int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - u32 clock; - u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); + struct intel_display *display = to_intel_display(encoder); + u32 clock, val; + + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); - clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); + clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val); - drm_WARN_ON(&i915->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); - drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); - drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_ACK)); + drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); + drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); + drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_ACK)); switch (clock) { case XELPDP_DDI_CLOCK_SELECT_TBT_162: @@ -3030,13 +3114,18 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) return 540000; case XELPDP_DDI_CLOCK_SELECT_TBT_810: return 810000; + case XELPDP_DDI_CLOCK_SELECT_TBT_312_5: + return 1000000; + case XELPDP_DDI_CLOCK_SELECT_TBT_625: + return 2000000; default: MISSING_CASE(clock); return 162000; } } -static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) +static int intel_mtl_tbt_clock_select(struct intel_display *display, + int clock) { switch (clock) { case 162000: @@ -3047,6 +3136,18 @@ static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) return XELPDP_DDI_CLOCK_SELECT_TBT_540; case 810000: return XELPDP_DDI_CLOCK_SELECT_TBT_810; + case 1000000: + if (DISPLAY_VER(display) < 30) { + drm_WARN_ON(display->drm, "UHBR10 not supported for the platform\n"); + return XELPDP_DDI_CLOCK_SELECT_TBT_162; + } + return XELPDP_DDI_CLOCK_SELECT_TBT_312_5; + case 2000000: + if (DISPLAY_VER(display) < 30) { + drm_WARN_ON(display->drm, "UHBR20 not supported for the platform\n"); + return XELPDP_DDI_CLOCK_SELECT_TBT_162; + } + return XELPDP_DDI_CLOCK_SELECT_TBT_625; default: MISSING_CASE(clock); return XELPDP_DDI_CLOCK_SELECT_TBT_162; @@ -3056,21 +3157,28 @@ static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); u32 val = 0; + u32 mask; /* * 1. Program PORT_CLOCK_CTL REGISTER to configure * clock muxes, gating and SSC */ - val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock)); + + mask = XELPDP_DDI_CLOCK_SELECT_MASK(display); + val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, + intel_mtl_tbt_clock_select(display, crtc_state->port_clock)); + + mask |= XELPDP_FORWARD_CLOCK_UNGATE; val |= XELPDP_FORWARD_CLOCK_UNGATE; - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), - XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val); + + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), + mask, val); /* 2. Read back PORT_CLOCK_CTL REGISTER */ - val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); /* * 3. Follow the Display Voltage Frequency Switching - Sequence @@ -3081,14 +3189,15 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL. */ val |= XELPDP_TBT_CLOCK_REQUEST; - intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val); + intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val); /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_ACK, XELPDP_TBT_CLOCK_ACK, 100, 0, NULL)) - drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", + drm_warn(display->drm, + "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* @@ -3100,7 +3209,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 7. Program DDI_CLK_VALFREQ to match intended DDI * clock frequency. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); } @@ -3117,12 +3226,13 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder, static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); if (intel_encoder_is_c10phy(encoder)) return CX0_P2PG_STATE_DISABLE; - if (IS_BATTLEMAGE(i915) && encoder->port == PORT_A) + if ((display->platform.battlemage && encoder->port == PORT_A) || + (DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP)) return CX0_P2PG_STATE_DISABLE; return CX0_P4PG_STATE_DISABLE; @@ -3130,7 +3240,7 @@ static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) static void intel_cx0pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -3147,21 +3257,22 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK> * to "0" to disable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0); /* 4. Program DDI_CLK_VALFREQ to 0. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); /* * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n", + drm_warn(display->drm, + "Port %c PLL not unlocked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US); /* @@ -3170,17 +3281,27 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) */ /* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), - XELPDP_DDI_CLOCK_SELECT_MASK, 0); - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), + XELPDP_DDI_CLOCK_SELECT_MASK(display), 0); + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_FORWARD_CLOCK_UNGATE, 0); intel_cx0_phy_transaction_end(encoder, wakeref); } +static bool intel_cx0_pll_is_enabled(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + u8 lane = dig_port->lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0; + + return intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)) & + intel_cx0_get_pclk_pll_request(lane); +} + static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); /* @@ -3191,13 +3312,14 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) /* * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_REQUEST, 0); /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) - drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", + drm_warn(display->drm, + "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* @@ -3208,12 +3330,12 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) /* * 5. Program PORT CLOCK CTRL register to disable and gate clocks */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), - XELPDP_DDI_CLOCK_SELECT_MASK | + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), + XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_FORWARD_CLOCK_UNGATE, 0); /* 6. Program DDI_CLK_VALFREQ to 0. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); } void intel_mtl_pll_disable(struct intel_encoder *encoder) @@ -3230,13 +3352,15 @@ enum icl_port_dpll_id intel_mtl_port_pll_type(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); + u32 val, clock; + /* * TODO: Determine the PLL type from the SW state, once MTL PLL * handling is done via the standard shared DPLL framework. */ - u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); - u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); + clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val); if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK || clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK) @@ -3250,28 +3374,28 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state, struct intel_encoder *encoder, struct intel_c10pll_state *mpllb_hw_state) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_c10pll_state *mpllb_sw_state = &state->dpll_hw_state.cx0pll.c10; int i; for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { u8 expected = mpllb_sw_state->pll[i]; - I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, i, - expected, mpllb_hw_state->pll[i]); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->pll[i] != expected, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, i, + expected, mpllb_hw_state->pll[i]); } - I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, - mpllb_sw_state->tx, mpllb_hw_state->tx); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->tx != mpllb_sw_state->tx, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->tx, mpllb_hw_state->tx); - I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, - mpllb_sw_state->cmn, mpllb_hw_state->cmn); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->cmn != mpllb_sw_state->cmn, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->cmn, mpllb_hw_state->cmn); } void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, @@ -3357,64 +3481,64 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state, struct intel_encoder *encoder, struct intel_c20pll_state *mpll_hw_state) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_c20pll_state *mpll_sw_state = &state->dpll_hw_state.cx0pll.c20; bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state); bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state); int clock = intel_c20pll_calc_port_clock(encoder, mpll_sw_state); int i; - I915_STATE_WARN(i915, mpll_hw_state->clock != clock, - "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)", - crtc->base.base.id, crtc->base.name, - mpll_sw_state->clock, mpll_hw_state->clock); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->clock != clock, + "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + mpll_sw_state->clock, mpll_hw_state->clock); - I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb, - "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", - crtc->base.base.id, crtc->base.name, - sw_use_mpllb, hw_use_mpllb); + INTEL_DISPLAY_STATE_WARN(display, sw_use_mpllb != hw_use_mpllb, + "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + sw_use_mpllb, hw_use_mpllb); if (hw_use_mpllb) { for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) { - I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], - "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], + "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); } } else { for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) { - I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], - "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], + "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); } } for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) { - I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i], - "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->tx[i], mpll_hw_state->tx[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->tx[i] != mpll_sw_state->tx[i], + "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->tx[i], mpll_hw_state->tx[i]); } for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) { - I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i], - "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i], + "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]); } } void intel_cx0pll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; struct intel_cx0pll_state mpll_hw_state = {}; - if (DISPLAY_VER(i915) < 14) + if (DISPLAY_VER(display) < 14) return; if (!new_crtc_state->hw.active) @@ -3436,3 +3560,54 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state, else intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20); } + +/* + * WA 14022081154 + * The dedicated display PHYs reset to a power state that blocks S0ix, increasing idle + * system power. After a system reset (cold boot, S3/4/5, warm reset) if a dedicated + * PHY is not being brought up shortly, use these steps to move the PHY to the lowest + * power state to save power. For PTL the workaround is needed only for port A. Port B + * is not connected. + * + * 1. Follow the PLL Enable Sequence, using any valid frequency such as DP 1.62 GHz. + * This brings lanes out of reset and enables the PLL to allow powerdown to be moved + * to the Disable state. + * 2. Follow PLL Disable Sequence. This moves powerdown to the Disable state and disables the PLL. + */ +void intel_cx0_pll_power_save_wa(struct intel_display *display) +{ + struct intel_encoder *encoder; + + if (DISPLAY_VER(display) != 30) + return; + + for_each_intel_encoder(display->drm, encoder) { + struct intel_cx0pll_state pll_state = {}; + int port_clock = 162000; + + if (!intel_encoder_is_dig_port(encoder)) + continue; + + if (!intel_encoder_is_c10phy(encoder)) + continue; + + if (intel_cx0_pll_is_enabled(encoder)) + continue; + + if (intel_c10pll_calc_state_from_table(encoder, + mtl_c10_edp_tables, + true, port_clock, + &pll_state) < 0) { + drm_WARN_ON(display->drm, + "Unable to calc C10 state from the tables\n"); + continue; + } + + drm_dbg_kms(display->drm, + "[ENCODER:%d:%s] Applying power saving workaround on disabled PLL\n", + encoder->base.base.id, encoder->base.name); + + __intel_cx0pll_enable(encoder, &pll_state, true, port_clock, 4); + intel_cx0pll_disable(encoder); + } +} |